language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def UploadFile(self, path, file_handle): """Uploads a file to the hosting service. Must only be called after Begin(). The path provided must be one of those that were returned by Begin(). Args: path: The path the file is being uploaded as. file_handle: A file-like object containing the data to upload. Raises: KeyError: The provided file is not amongst those to be uploaded. """ assert self.in_transaction, 'Begin() must be called before UploadFile().' if path not in self.files: raise KeyError('File \'%s\' is not in the list of files to be uploaded.' % path) del self.files[path] file_classification = FileClassification(self.config, path) payload = file_handle.read() if file_classification.IsStaticFile(): self.blob_batcher.AddToBatch(path, payload, file_classification.StaticMimeType()) if file_classification.IsErrorFile(): self.errorblob_batcher.AddToBatch(file_classification.ErrorCode(), payload, file_classification.ErrorMimeType()) if file_classification.IsApplicationFile(): self.file_batcher.AddToBatch(path, payload, None)
def UploadFile(self, path, file_handle): """Uploads a file to the hosting service. Must only be called after Begin(). The path provided must be one of those that were returned by Begin(). Args: path: The path the file is being uploaded as. file_handle: A file-like object containing the data to upload. Raises: KeyError: The provided file is not amongst those to be uploaded. """ assert self.in_transaction, 'Begin() must be called before UploadFile().' if path not in self.files: raise KeyError('File \'%s\' is not in the list of files to be uploaded.' % path) del self.files[path] file_classification = FileClassification(self.config, path) payload = file_handle.read() if file_classification.IsStaticFile(): self.blob_batcher.AddToBatch(path, payload, file_classification.StaticMimeType()) if file_classification.IsErrorFile(): self.errorblob_batcher.AddToBatch(file_classification.ErrorCode(), payload, file_classification.ErrorMimeType()) if file_classification.IsApplicationFile(): self.file_batcher.AddToBatch(path, payload, None)
Python
def Commit(self): """Commits the transaction, making the new app version available. All the files returned by Begin() must have been uploaded with UploadFile() before Commit() can be called. This tries the new 'deploy' method; if that fails it uses the old 'commit'. Returns: An appinfo.AppInfoSummary if one was returned from the Deploy, None otherwise. Raises: Exception: Some required files were not uploaded. """ assert self.in_transaction, 'Begin() must be called before Commit().' if self.files: raise Exception('Not all required files have been uploaded.') def PrintRetryMessage(_, delay): StatusUpdate('Will check again in %s seconds.' % delay) app_summary = None app_summary = self.Deploy() success, unused_contents = RetryWithBackoff( lambda: (self.IsReady(), None), PrintRetryMessage, 1, 2, 60, 20) if not success: logging.warning('Version still not ready to serve, aborting.') raise Exception('Version not ready.') result = self.StartServing() if not result: self.in_transaction = False else: success, unused_contents = RetryWithBackoff( lambda: (self.IsServing(), None), PrintRetryMessage, 1, 2, 60, 20) if not success: logging.warning('Version still not serving, aborting.') raise Exception('Version not ready.') self.in_transaction = False return app_summary
def Commit(self): """Commits the transaction, making the new app version available. All the files returned by Begin() must have been uploaded with UploadFile() before Commit() can be called. This tries the new 'deploy' method; if that fails it uses the old 'commit'. Returns: An appinfo.AppInfoSummary if one was returned from the Deploy, None otherwise. Raises: Exception: Some required files were not uploaded. """ assert self.in_transaction, 'Begin() must be called before Commit().' if self.files: raise Exception('Not all required files have been uploaded.') def PrintRetryMessage(_, delay): StatusUpdate('Will check again in %s seconds.' % delay) app_summary = None app_summary = self.Deploy() success, unused_contents = RetryWithBackoff( lambda: (self.IsReady(), None), PrintRetryMessage, 1, 2, 60, 20) if not success: logging.warning('Version still not ready to serve, aborting.') raise Exception('Version not ready.') result = self.StartServing() if not result: self.in_transaction = False else: success, unused_contents = RetryWithBackoff( lambda: (self.IsServing(), None), PrintRetryMessage, 1, 2, 60, 20) if not success: logging.warning('Version still not serving, aborting.') raise Exception('Version not ready.') self.in_transaction = False return app_summary
Python
def IsServing(self): """Check if the new app version is serving. Raises: Exception: Deploy has not yet been called. Returns: True if the deployed app version is serving. """ assert self.started, 'StartServing() must be called before IsServing().' StatusUpdate('Checking if updated app version is serving.') result = self.Send('/api/appversion/isserving') return result == '1'
def IsServing(self): """Check if the new app version is serving. Raises: Exception: Deploy has not yet been called. Returns: True if the deployed app version is serving. """ assert self.started, 'StartServing() must be called before IsServing().' StatusUpdate('Checking if updated app version is serving.') result = self.Send('/api/appversion/isserving') return result == '1'
Python
def DoUpload(self, paths, openfunc): """Uploads a new appversion with the given config and files to the server. Args: paths: An iterator that yields the relative paths of the files to upload. openfunc: A function that takes a path and returns a file-like object. Returns: An appinfo.AppInfoSummary if one was returned from the server, None otherwise. """ logging.info('Reading app configuration.') StatusUpdate('\nStarting update of %s' % self.Describe()) path = '' try: self.resource_limits = GetResourceLimits(self.rpcserver, self.config) StatusUpdate('Scanning files on local disk.') num_files = 0 for path in paths: file_handle = openfunc(path) file_classification = FileClassification(self.config, path) try: file_length = GetFileLength(file_handle) if file_classification.IsApplicationFile(): max_size = self.resource_limits['max_file_size'] else: max_size = self.resource_limits['max_blob_size'] if file_length > max_size: logging.error('Ignoring file \'%s\': Too long ' '(max %d bytes, file is %d bytes)', path, max_size, file_length) else: logging.info('Processing file \'%s\'', path) self.AddFile(path, file_handle) finally: file_handle.close() num_files += 1 if num_files % 500 == 0: StatusUpdate('Scanned %d files.' % num_files) except KeyboardInterrupt: logging.info('User interrupted. Aborting.') raise except EnvironmentError, e: logging.error('An error occurred processing file \'%s\': %s. Aborting.', path, e) raise app_summary = None try: missing_files = self.Begin() if missing_files: StatusUpdate('Uploading %d files and blobs.' % len(missing_files)) num_files = 0 for missing_file in missing_files: file_handle = openfunc(missing_file) try: self.UploadFile(missing_file, file_handle) finally: file_handle.close() num_files += 1 if num_files % 500 == 0: StatusUpdate('Processed %d out of %s.' % (num_files, len(missing_files))) self.file_batcher.Flush() self.blob_batcher.Flush() self.errorblob_batcher.Flush() StatusUpdate('Uploaded %d files and blobs' % num_files) if (self.config.derived_file_type and appinfo.PYTHON_PRECOMPILED in self.config.derived_file_type): try: self.Precompile() except urllib2.HTTPError, e: ErrorUpdate('Error %d: --- begin server output ---\n' '%s\n--- end server output ---' % (e.code, e.read().rstrip('\n'))) if e.code == 422 or self.config.runtime == 'go': raise print >>self.error_fh, ( 'Precompilation failed. Your app can still serve but may ' 'have reduced startup performance. You can retry the update ' 'later to retry the precompilation step.') app_summary = self.Commit() StatusUpdate('Completed update of %s' % self.Describe()) except KeyboardInterrupt: logging.info('User interrupted. Aborting.') self.Rollback() raise except urllib2.HTTPError, err: logging.info('HTTP Error (%s)', err) self.Rollback() raise except: logging.exception('An unexpected error occurred. Aborting.') self.Rollback() raise logging.info('Done!') return app_summary
def DoUpload(self, paths, openfunc): """Uploads a new appversion with the given config and files to the server. Args: paths: An iterator that yields the relative paths of the files to upload. openfunc: A function that takes a path and returns a file-like object. Returns: An appinfo.AppInfoSummary if one was returned from the server, None otherwise. """ logging.info('Reading app configuration.') StatusUpdate('\nStarting update of %s' % self.Describe()) path = '' try: self.resource_limits = GetResourceLimits(self.rpcserver, self.config) StatusUpdate('Scanning files on local disk.') num_files = 0 for path in paths: file_handle = openfunc(path) file_classification = FileClassification(self.config, path) try: file_length = GetFileLength(file_handle) if file_classification.IsApplicationFile(): max_size = self.resource_limits['max_file_size'] else: max_size = self.resource_limits['max_blob_size'] if file_length > max_size: logging.error('Ignoring file \'%s\': Too long ' '(max %d bytes, file is %d bytes)', path, max_size, file_length) else: logging.info('Processing file \'%s\'', path) self.AddFile(path, file_handle) finally: file_handle.close() num_files += 1 if num_files % 500 == 0: StatusUpdate('Scanned %d files.' % num_files) except KeyboardInterrupt: logging.info('User interrupted. Aborting.') raise except EnvironmentError, e: logging.error('An error occurred processing file \'%s\': %s. Aborting.', path, e) raise app_summary = None try: missing_files = self.Begin() if missing_files: StatusUpdate('Uploading %d files and blobs.' % len(missing_files)) num_files = 0 for missing_file in missing_files: file_handle = openfunc(missing_file) try: self.UploadFile(missing_file, file_handle) finally: file_handle.close() num_files += 1 if num_files % 500 == 0: StatusUpdate('Processed %d out of %s.' % (num_files, len(missing_files))) self.file_batcher.Flush() self.blob_batcher.Flush() self.errorblob_batcher.Flush() StatusUpdate('Uploaded %d files and blobs' % num_files) if (self.config.derived_file_type and appinfo.PYTHON_PRECOMPILED in self.config.derived_file_type): try: self.Precompile() except urllib2.HTTPError, e: ErrorUpdate('Error %d: --- begin server output ---\n' '%s\n--- end server output ---' % (e.code, e.read().rstrip('\n'))) if e.code == 422 or self.config.runtime == 'go': raise print >>self.error_fh, ( 'Precompilation failed. Your app can still serve but may ' 'have reduced startup performance. You can retry the update ' 'later to retry the precompilation step.') app_summary = self.Commit() StatusUpdate('Completed update of %s' % self.Describe()) except KeyboardInterrupt: logging.info('User interrupted. Aborting.') self.Rollback() raise except urllib2.HTTPError, err: logging.info('HTTP Error (%s)', err) self.Rollback() raise except: logging.exception('An unexpected error occurred. Aborting.') self.Rollback() raise logging.info('Done!') return app_summary
Python
def GetUserAgent(get_version=GetVersionObject, get_platform=appengine_rpc.GetPlatformToken): """Determines the value of the 'User-agent' header to use for HTTP requests. If the 'APPCFG_SDK_NAME' environment variable is present, that will be used as the first product token in the user-agent. Args: get_version: Used for testing. get_platform: Used for testing. Returns: String containing the 'user-agent' header value, which includes the SDK version, the platform information, and the version of Python; e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'. """ product_tokens = [] sdk_name = os.environ.get('APPCFG_SDK_NAME') if sdk_name: product_tokens.append(sdk_name) else: version = get_version() if version is None: release = 'unknown' else: release = version['release'] product_tokens.append('%s/%s' % (SDK_PRODUCT, release)) product_tokens.append(get_platform()) python_version = '.'.join(str(i) for i in sys.version_info) product_tokens.append('Python/%s' % python_version) return ' '.join(product_tokens)
def GetUserAgent(get_version=GetVersionObject, get_platform=appengine_rpc.GetPlatformToken): """Determines the value of the 'User-agent' header to use for HTTP requests. If the 'APPCFG_SDK_NAME' environment variable is present, that will be used as the first product token in the user-agent. Args: get_version: Used for testing. get_platform: Used for testing. Returns: String containing the 'user-agent' header value, which includes the SDK version, the platform information, and the version of Python; e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'. """ product_tokens = [] sdk_name = os.environ.get('APPCFG_SDK_NAME') if sdk_name: product_tokens.append(sdk_name) else: version = get_version() if version is None: release = 'unknown' else: release = version['release'] product_tokens.append('%s/%s' % (SDK_PRODUCT, release)) product_tokens.append(get_platform()) python_version = '.'.join(str(i) for i in sys.version_info) product_tokens.append('Python/%s' % python_version) return ' '.join(product_tokens)
Python
def Run(self): """Executes the requested action. Catches any HTTPErrors raised by the action and prints them to stderr. Returns: 1 on error, 0 if successful. """ try: self.action(self) except urllib2.HTTPError, e: body = e.read() if self.wrap_server_error_message: error_format = ('Error %d: --- begin server output ---\n' '%s\n--- end server output ---') else: error_format = 'Error %d: %s' print >>self.error_fh, (error_format % (e.code, body.rstrip('\n'))) return 1 except yaml_errors.EventListenerError, e: print >>self.error_fh, ('Error parsing yaml file:\n%s' % e) return 1 return 0
def Run(self): """Executes the requested action. Catches any HTTPErrors raised by the action and prints them to stderr. Returns: 1 on error, 0 if successful. """ try: self.action(self) except urllib2.HTTPError, e: body = e.read() if self.wrap_server_error_message: error_format = ('Error %d: --- begin server output ---\n' '%s\n--- end server output ---') else: error_format = 'Error %d: %s' print >>self.error_fh, (error_format % (e.code, body.rstrip('\n'))) return 1 except yaml_errors.EventListenerError, e: print >>self.error_fh, ('Error parsing yaml file:\n%s' % e) return 1 return 0
Python
def _GetOptionParser(self): """Creates an OptionParser with generic usage and description strings. Returns: An OptionParser instance. """ class Formatter(optparse.IndentedHelpFormatter): """Custom help formatter that does not reformat the description.""" def format_description(self, description): """Very simple formatter.""" return description + '\n' desc = self._GetActionDescriptions() desc = ('Action must be one of:\n%s' 'Use \'help <action>\' for a detailed description.') % desc parser = self.parser_class(usage='%prog [options] <action>', description=desc, formatter=Formatter(), conflict_handler='resolve') parser.add_option('-h', '--help', action='store_true', dest='help', help='Show the help message and exit.') parser.add_option('-q', '--quiet', action='store_const', const=0, dest='verbose', help='Print errors only.') parser.add_option('-v', '--verbose', action='store_const', const=2, dest='verbose', default=1, help='Print info level logs.') parser.add_option('--noisy', action='store_const', const=3, dest='verbose', help='Print all logs.') parser.add_option('-s', '--server', action='store', dest='server', default='appengine.google.com', metavar='SERVER', help='The App Engine server.') parser.add_option('--secure', action='store_true', dest='secure', default=True, help=optparse.SUPPRESS_HELP) parser.add_option('--insecure', action='store_false', dest='secure', help='Use HTTP when communicating with the server.') parser.add_option('-e', '--email', action='store', dest='email', metavar='EMAIL', default=None, help='The username to use. Will prompt if omitted.') parser.add_option('-H', '--host', action='store', dest='host', metavar='HOST', default=None, help='Overrides the Host header sent with all RPCs.') parser.add_option('--no_cookies', action='store_false', dest='save_cookies', default=True, help='Do not save authentication cookies to local disk.') parser.add_option('--skip_sdk_update_check', action='store_true', dest='skip_sdk_update_check', default=False, help='Do not check for SDK updates.') parser.add_option('--passin', action='store_true', dest='passin', default=False, help='Read the login password from stdin.') parser.add_option('-A', '--application', action='store', dest='app_id', help='Override application from app.yaml file.') parser.add_option('-V', '--version', action='store', dest='version', help='Override (major) version from app.yaml file.') parser.add_option('-r', '--runtime', action='store', dest='runtime', help='Override runtime from app.yaml file.') parser.add_option('-R', '--allow_any_runtime', action='store_true', dest='allow_any_runtime', default=False, help='Do not validate the runtime in app.yaml') parser.add_option('--oauth2', action='store_true', dest='oauth2', default=False, help='Use OAuth2 instead of password auth.') parser.add_option('--oauth2_refresh_token', action='store', dest='oauth2_refresh_token', default=None, help='An existing OAuth2 refresh token to use. Will ' 'not attempt interactive OAuth approval.') parser.add_option('--noauth_local_webserver', action='store_false', dest='auth_local_webserver', default=True, help='Do not run a local web server to handle redirects ' 'during OAuth authorization.') return parser
def _GetOptionParser(self): """Creates an OptionParser with generic usage and description strings. Returns: An OptionParser instance. """ class Formatter(optparse.IndentedHelpFormatter): """Custom help formatter that does not reformat the description.""" def format_description(self, description): """Very simple formatter.""" return description + '\n' desc = self._GetActionDescriptions() desc = ('Action must be one of:\n%s' 'Use \'help <action>\' for a detailed description.') % desc parser = self.parser_class(usage='%prog [options] <action>', description=desc, formatter=Formatter(), conflict_handler='resolve') parser.add_option('-h', '--help', action='store_true', dest='help', help='Show the help message and exit.') parser.add_option('-q', '--quiet', action='store_const', const=0, dest='verbose', help='Print errors only.') parser.add_option('-v', '--verbose', action='store_const', const=2, dest='verbose', default=1, help='Print info level logs.') parser.add_option('--noisy', action='store_const', const=3, dest='verbose', help='Print all logs.') parser.add_option('-s', '--server', action='store', dest='server', default='appengine.google.com', metavar='SERVER', help='The App Engine server.') parser.add_option('--secure', action='store_true', dest='secure', default=True, help=optparse.SUPPRESS_HELP) parser.add_option('--insecure', action='store_false', dest='secure', help='Use HTTP when communicating with the server.') parser.add_option('-e', '--email', action='store', dest='email', metavar='EMAIL', default=None, help='The username to use. Will prompt if omitted.') parser.add_option('-H', '--host', action='store', dest='host', metavar='HOST', default=None, help='Overrides the Host header sent with all RPCs.') parser.add_option('--no_cookies', action='store_false', dest='save_cookies', default=True, help='Do not save authentication cookies to local disk.') parser.add_option('--skip_sdk_update_check', action='store_true', dest='skip_sdk_update_check', default=False, help='Do not check for SDK updates.') parser.add_option('--passin', action='store_true', dest='passin', default=False, help='Read the login password from stdin.') parser.add_option('-A', '--application', action='store', dest='app_id', help='Override application from app.yaml file.') parser.add_option('-V', '--version', action='store', dest='version', help='Override (major) version from app.yaml file.') parser.add_option('-r', '--runtime', action='store', dest='runtime', help='Override runtime from app.yaml file.') parser.add_option('-R', '--allow_any_runtime', action='store_true', dest='allow_any_runtime', default=False, help='Do not validate the runtime in app.yaml') parser.add_option('--oauth2', action='store_true', dest='oauth2', default=False, help='Use OAuth2 instead of password auth.') parser.add_option('--oauth2_refresh_token', action='store', dest='oauth2_refresh_token', default=None, help='An existing OAuth2 refresh token to use. Will ' 'not attempt interactive OAuth approval.') parser.add_option('--noauth_local_webserver', action='store_false', dest='auth_local_webserver', default=True, help='Do not run a local web server to handle redirects ' 'during OAuth authorization.') return parser
Python
def _GetRpcServer(self): """Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made. Raises: OAuthNotAvailable: Oauth is requested but the dependecies aren't imported. """ def GetUserCredentials(): """Prompts the user for a username and password.""" email = self.options.email if email is None: email = self.raw_input_fn('Email: ') password_prompt = 'Password for %s: ' % email if self.options.passin: password = self.raw_input_fn(password_prompt) else: password = self.password_input_fn(password_prompt) return (email, password) StatusUpdate('Host: %s' % self.options.server) dev_appserver = self.options.host == 'localhost' if self.options.oauth2 and not dev_appserver: if not appengine_rpc_httplib2: raise OAuthNotAvailable() if not self.rpc_server_class: self.rpc_server_class = appengine_rpc_httplib2.HttpRpcServerOauth2 get_user_credentials = self.options.oauth2_refresh_token source = (self.oauth_client_id, self.oauth_client_secret, self.oauth_scopes) appengine_rpc_httplib2.tools.FLAGS.auth_local_webserver = ( self.options.auth_local_webserver) else: if not self.rpc_server_class: self.rpc_server_class = appengine_rpc.HttpRpcServerWithOAuth2Suggestion get_user_credentials = GetUserCredentials source = GetSourceName() if dev_appserver: email = self.options.email if email is None: email = '[email protected]' logging.info('Using debug user %s. Override with --email', email) rpcserver = self.rpc_server_class( self.options.server, lambda: (email, 'password'), GetUserAgent(), source, host_override=self.options.host, save_cookies=self.options.save_cookies, secure=False) rpcserver.authenticated = True return rpcserver if self.options.passin: auth_tries = 1 else: auth_tries = 3 return self.rpc_server_class(self.options.server, get_user_credentials, GetUserAgent(), source, host_override=self.options.host, save_cookies=self.options.save_cookies, auth_tries=auth_tries, account_type='HOSTED_OR_GOOGLE', secure=self.options.secure)
def _GetRpcServer(self): """Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made. Raises: OAuthNotAvailable: Oauth is requested but the dependecies aren't imported. """ def GetUserCredentials(): """Prompts the user for a username and password.""" email = self.options.email if email is None: email = self.raw_input_fn('Email: ') password_prompt = 'Password for %s: ' % email if self.options.passin: password = self.raw_input_fn(password_prompt) else: password = self.password_input_fn(password_prompt) return (email, password) StatusUpdate('Host: %s' % self.options.server) dev_appserver = self.options.host == 'localhost' if self.options.oauth2 and not dev_appserver: if not appengine_rpc_httplib2: raise OAuthNotAvailable() if not self.rpc_server_class: self.rpc_server_class = appengine_rpc_httplib2.HttpRpcServerOauth2 get_user_credentials = self.options.oauth2_refresh_token source = (self.oauth_client_id, self.oauth_client_secret, self.oauth_scopes) appengine_rpc_httplib2.tools.FLAGS.auth_local_webserver = ( self.options.auth_local_webserver) else: if not self.rpc_server_class: self.rpc_server_class = appengine_rpc.HttpRpcServerWithOAuth2Suggestion get_user_credentials = GetUserCredentials source = GetSourceName() if dev_appserver: email = self.options.email if email is None: email = '[email protected]' logging.info('Using debug user %s. Override with --email', email) rpcserver = self.rpc_server_class( self.options.server, lambda: (email, 'password'), GetUserAgent(), source, host_override=self.options.host, save_cookies=self.options.save_cookies, secure=False) rpcserver.authenticated = True return rpcserver if self.options.passin: auth_tries = 1 else: auth_tries = 3 return self.rpc_server_class(self.options.server, get_user_credentials, GetUserAgent(), source, host_override=self.options.host, save_cookies=self.options.save_cookies, auth_tries=auth_tries, account_type='HOSTED_OR_GOOGLE', secure=self.options.secure)
Python
def _FindYaml(self, basepath, file_name): """Find yaml files in application directory. Args: basepath: Base application directory. file_name: Relative file path from basepath, without extension, to search for. Returns: Path to located yaml file if one exists, else None. """ if not os.path.isdir(basepath): self.parser.error('Not a directory: %s' % basepath) alt_basepath = os.path.join(basepath, "WEB-INF", "appengine-generated") for yaml_basepath in (basepath, alt_basepath): for yaml_file in (file_name + '.yaml', file_name + '.yml'): yaml_path = os.path.join(yaml_basepath, yaml_file) if os.path.isfile(yaml_path): return yaml_path return None
def _FindYaml(self, basepath, file_name): """Find yaml files in application directory. Args: basepath: Base application directory. file_name: Relative file path from basepath, without extension, to search for. Returns: Path to located yaml file if one exists, else None. """ if not os.path.isdir(basepath): self.parser.error('Not a directory: %s' % basepath) alt_basepath = os.path.join(basepath, "WEB-INF", "appengine-generated") for yaml_basepath in (basepath, alt_basepath): for yaml_file in (file_name + '.yaml', file_name + '.yml'): yaml_path = os.path.join(yaml_basepath, yaml_file) if os.path.isfile(yaml_path): return yaml_path return None
Python
def DownloadApp(self): """Downloads the given app+version.""" if len(self.args) != 1: self.parser.error('\"download_app\" expects one non-option argument, ' 'found ' + str(len(self.args)) + '.') out_dir = self.args[0] app_id = self.options.app_id if app_id is None: self.parser.error('You must specify an app ID via -A or --application.') app_version = self.options.version if os.path.exists(out_dir): if not os.path.isdir(out_dir): self.parser.error('Cannot download to path "%s": ' 'there\'s a file in the way.' % out_dir) elif os.listdir(out_dir): self.parser.error('Cannot download to path "%s": directory already ' 'exists and it isn\'t empty.' % out_dir) rpcserver = self._GetRpcServer() DoDownloadApp(rpcserver, out_dir, app_id, app_version)
def DownloadApp(self): """Downloads the given app+version.""" if len(self.args) != 1: self.parser.error('\"download_app\" expects one non-option argument, ' 'found ' + str(len(self.args)) + '.') out_dir = self.args[0] app_id = self.options.app_id if app_id is None: self.parser.error('You must specify an app ID via -A or --application.') app_version = self.options.version if os.path.exists(out_dir): if not os.path.isdir(out_dir): self.parser.error('Cannot download to path "%s": ' 'there\'s a file in the way.' % out_dir) elif os.listdir(out_dir): self.parser.error('Cannot download to path "%s": directory already ' 'exists and it isn\'t empty.' % out_dir) rpcserver = self._GetRpcServer() DoDownloadApp(rpcserver, out_dir, app_id, app_version)
Python
def UpdateVersion(self, rpcserver, basepath, appyaml, backend=None): """Updates and deploys a new appversion. Args: rpcserver: An AbstractRpcServer instance on which RPC calls can be made. basepath: The root directory of the version to update. appyaml: The AppInfoExternal object parsed from an app.yaml-like file. backend: The name of the backend to update, if any. Returns: An appinfo.AppInfoSummary if one was returned from the Deploy, None otherwise. """ if self.options.precompilation: if not appyaml.derived_file_type: appyaml.derived_file_type = [] if appinfo.PYTHON_PRECOMPILED not in appyaml.derived_file_type: appyaml.derived_file_type.append(appinfo.PYTHON_PRECOMPILED) appversion = AppVersionUpload(rpcserver, appyaml, backend, self.error_fh) return appversion.DoUpload( self.file_iterator(basepath, appyaml.skip_files, appyaml.runtime), lambda path: self.opener(os.path.join(basepath, path), 'rb'))
def UpdateVersion(self, rpcserver, basepath, appyaml, backend=None): """Updates and deploys a new appversion. Args: rpcserver: An AbstractRpcServer instance on which RPC calls can be made. basepath: The root directory of the version to update. appyaml: The AppInfoExternal object parsed from an app.yaml-like file. backend: The name of the backend to update, if any. Returns: An appinfo.AppInfoSummary if one was returned from the Deploy, None otherwise. """ if self.options.precompilation: if not appyaml.derived_file_type: appyaml.derived_file_type = [] if appinfo.PYTHON_PRECOMPILED not in appyaml.derived_file_type: appyaml.derived_file_type.append(appinfo.PYTHON_PRECOMPILED) appversion = AppVersionUpload(rpcserver, appyaml, backend, self.error_fh) return appversion.DoUpload( self.file_iterator(basepath, appyaml.skip_files, appyaml.runtime), lambda path: self.opener(os.path.join(basepath, path), 'rb'))
Python
def Update(self): """Updates and deploys a new appversion and global app configs.""" appyaml = None rpcserver = self._GetRpcServer() if os.path.isdir(self.basepath): appyaml = self._ParseAppInfoFromYaml(self.basepath) has_python25_version = appyaml.runtime == 'python' if self.options.skip_sdk_update_check: logging.info('Skipping update check') else: updatecheck = self.update_check_class(rpcserver, appyaml) updatecheck.CheckForUpdates() self.UpdateVersion(rpcserver, self.basepath, appyaml) else: all_files = [self.basepath] + self.args has_python25_version = False for yaml_path in all_files: file_name = os.path.basename(yaml_path) self.basepath = os.path.dirname(yaml_path) if not self.basepath: self.basepath = '.' server_yaml = self._ParseAppInfoFromYaml(self.basepath, os.path.splitext(file_name)[0]) if server_yaml.runtime == 'python': has_python25_version = True if not server_yaml.server and file_name != 'app.yaml': ErrorUpdate("Error: 'server' parameter not specified in %s" % yaml_path) continue self.UpdateVersion(rpcserver, self.basepath, server_yaml) if has_python25_version: MigratePython27Notice() if self.options.backends: self.BackendsUpdate() index_defs = self._ParseIndexYaml(self.basepath) if index_defs: index_upload = IndexDefinitionUpload(rpcserver, appyaml, index_defs) try: index_upload.DoUpload() except urllib2.HTTPError, e: ErrorUpdate('Error %d: --- begin server output ---\n' '%s\n--- end server output ---' % (e.code, e.read().rstrip('\n'))) print >> self.error_fh, ( 'Your app was updated, but there was an error updating your ' 'indexes. Please retry later with appcfg.py update_indexes.') cron_yaml = self._ParseCronYaml(self.basepath) if cron_yaml: cron_upload = CronEntryUpload(rpcserver, appyaml, cron_yaml) cron_upload.DoUpload() queue_yaml = self._ParseQueueYaml(self.basepath) if queue_yaml: queue_upload = QueueEntryUpload(rpcserver, appyaml, queue_yaml) queue_upload.DoUpload() dos_yaml = self._ParseDosYaml(self.basepath) if dos_yaml: dos_upload = DosEntryUpload(rpcserver, appyaml, dos_yaml) dos_upload.DoUpload() if appyaml: pagespeed_upload = PagespeedEntryUpload( rpcserver, appyaml, appyaml.pagespeed) try: pagespeed_upload.DoUpload() except urllib2.HTTPError, e: ErrorUpdate('Error %d: --- begin server output ---\n' '%s\n--- end server output ---' % (e.code, e.read().rstrip('\n'))) print >> self.error_fh, ( 'Your app was updated, but there was an error updating PageSpeed. ' 'Please try the update again later.')
def Update(self): """Updates and deploys a new appversion and global app configs.""" appyaml = None rpcserver = self._GetRpcServer() if os.path.isdir(self.basepath): appyaml = self._ParseAppInfoFromYaml(self.basepath) has_python25_version = appyaml.runtime == 'python' if self.options.skip_sdk_update_check: logging.info('Skipping update check') else: updatecheck = self.update_check_class(rpcserver, appyaml) updatecheck.CheckForUpdates() self.UpdateVersion(rpcserver, self.basepath, appyaml) else: all_files = [self.basepath] + self.args has_python25_version = False for yaml_path in all_files: file_name = os.path.basename(yaml_path) self.basepath = os.path.dirname(yaml_path) if not self.basepath: self.basepath = '.' server_yaml = self._ParseAppInfoFromYaml(self.basepath, os.path.splitext(file_name)[0]) if server_yaml.runtime == 'python': has_python25_version = True if not server_yaml.server and file_name != 'app.yaml': ErrorUpdate("Error: 'server' parameter not specified in %s" % yaml_path) continue self.UpdateVersion(rpcserver, self.basepath, server_yaml) if has_python25_version: MigratePython27Notice() if self.options.backends: self.BackendsUpdate() index_defs = self._ParseIndexYaml(self.basepath) if index_defs: index_upload = IndexDefinitionUpload(rpcserver, appyaml, index_defs) try: index_upload.DoUpload() except urllib2.HTTPError, e: ErrorUpdate('Error %d: --- begin server output ---\n' '%s\n--- end server output ---' % (e.code, e.read().rstrip('\n'))) print >> self.error_fh, ( 'Your app was updated, but there was an error updating your ' 'indexes. Please retry later with appcfg.py update_indexes.') cron_yaml = self._ParseCronYaml(self.basepath) if cron_yaml: cron_upload = CronEntryUpload(rpcserver, appyaml, cron_yaml) cron_upload.DoUpload() queue_yaml = self._ParseQueueYaml(self.basepath) if queue_yaml: queue_upload = QueueEntryUpload(rpcserver, appyaml, queue_yaml) queue_upload.DoUpload() dos_yaml = self._ParseDosYaml(self.basepath) if dos_yaml: dos_upload = DosEntryUpload(rpcserver, appyaml, dos_yaml) dos_upload.DoUpload() if appyaml: pagespeed_upload = PagespeedEntryUpload( rpcserver, appyaml, appyaml.pagespeed) try: pagespeed_upload.DoUpload() except urllib2.HTTPError, e: ErrorUpdate('Error %d: --- begin server output ---\n' '%s\n--- end server output ---' % (e.code, e.read().rstrip('\n'))) print >> self.error_fh, ( 'Your app was updated, but there was an error updating PageSpeed. ' 'Please try the update again later.')
Python
def UpdateCron(self): """Updates any new or changed cron definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppInfoFromYaml(self.basepath) rpcserver = self._GetRpcServer() cron_yaml = self._ParseCronYaml(self.basepath) if cron_yaml: cron_upload = CronEntryUpload(rpcserver, appyaml, cron_yaml) cron_upload.DoUpload()
def UpdateCron(self): """Updates any new or changed cron definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppInfoFromYaml(self.basepath) rpcserver = self._GetRpcServer() cron_yaml = self._ParseCronYaml(self.basepath) if cron_yaml: cron_upload = CronEntryUpload(rpcserver, appyaml, cron_yaml) cron_upload.DoUpload()
Python
def UpdateQueues(self): """Updates any new or changed task queue definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppInfoFromYaml(self.basepath) rpcserver = self._GetRpcServer() queue_yaml = self._ParseQueueYaml(self.basepath) if queue_yaml: queue_upload = QueueEntryUpload(rpcserver, appyaml, queue_yaml) queue_upload.DoUpload()
def UpdateQueues(self): """Updates any new or changed task queue definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppInfoFromYaml(self.basepath) rpcserver = self._GetRpcServer() queue_yaml = self._ParseQueueYaml(self.basepath) if queue_yaml: queue_upload = QueueEntryUpload(rpcserver, appyaml, queue_yaml) queue_upload.DoUpload()
Python
def UpdateDispatch(self): """Updates new or changed dispatch definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') rpcserver = self._GetRpcServer() dispatch_yaml = self._ParseDispatchYaml(self.basepath) if dispatch_yaml: if self.options.app_id: dispatch_yaml.application = self.options.app_id if not dispatch_yaml.application: self.parser.error('Expected -A app_id when dispatch.yaml.application' ' is not set.') StatusUpdate('Uploading dispatch entries.') rpcserver.Send('/api/dispatch/update', app_id=dispatch_yaml.application, payload=dispatch_yaml.ToYAML())
def UpdateDispatch(self): """Updates new or changed dispatch definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') rpcserver = self._GetRpcServer() dispatch_yaml = self._ParseDispatchYaml(self.basepath) if dispatch_yaml: if self.options.app_id: dispatch_yaml.application = self.options.app_id if not dispatch_yaml.application: self.parser.error('Expected -A app_id when dispatch.yaml.application' ' is not set.') StatusUpdate('Uploading dispatch entries.') rpcserver.Send('/api/dispatch/update', app_id=dispatch_yaml.application, payload=dispatch_yaml.ToYAML())
Python
def UpdateDos(self): """Updates any new or changed dos definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppInfoFromYaml(self.basepath) rpcserver = self._GetRpcServer() dos_yaml = self._ParseDosYaml(self.basepath) if dos_yaml: dos_upload = DosEntryUpload(rpcserver, appyaml, dos_yaml) dos_upload.DoUpload()
def UpdateDos(self): """Updates any new or changed dos definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppInfoFromYaml(self.basepath) rpcserver = self._GetRpcServer() dos_yaml = self._ParseDosYaml(self.basepath) if dos_yaml: dos_upload = DosEntryUpload(rpcserver, appyaml, dos_yaml) dos_upload.DoUpload()
Python
def BackendsConfigure(self): """Changes the configuration of an existing backend.""" if len(self.args) != 1: self.parser.error('Expected a single <backend> argument.') backend = self.args[0] appyaml = self._ParseAppInfoFromYaml(self.basepath) backends_yaml = self._ParseBackendsYaml(self.basepath) rpcserver = self._GetRpcServer() response = rpcserver.Send('/api/backends/configure', app_id=appyaml.application, backend=backend, payload=backends_yaml.ToYAML()) print >> self.out_fh, response
def BackendsConfigure(self): """Changes the configuration of an existing backend.""" if len(self.args) != 1: self.parser.error('Expected a single <backend> argument.') backend = self.args[0] appyaml = self._ParseAppInfoFromYaml(self.basepath) backends_yaml = self._ParseBackendsYaml(self.basepath) rpcserver = self._GetRpcServer() response = rpcserver.Send('/api/backends/configure', app_id=appyaml.application, backend=backend, payload=backends_yaml.ToYAML()) print >> self.out_fh, response
Python
def Rollback(self): """Does a rollback of an existing transaction for this app version.""" if self.args: self.parser.error('Expected a single <directory> argument.') self._Rollback()
def Rollback(self): """Does a rollback of an existing transaction for this app version.""" if self.args: self.parser.error('Expected a single <directory> argument.') self._Rollback()
Python
def _Rollback(self, backend=None): """Does a rollback of an existing transaction. Args: backend: name of a backend to rollback, or None If a backend is specified the rollback will affect only that backend, if no backend is specified the rollback will affect the current app version. """ appyaml = self._ParseAppInfoFromYaml(self.basepath) appversion = AppVersionUpload(self._GetRpcServer(), appyaml, backend) appversion.in_transaction = True appversion.Rollback()
def _Rollback(self, backend=None): """Does a rollback of an existing transaction. Args: backend: name of a backend to rollback, or None If a backend is specified the rollback will affect only that backend, if no backend is specified the rollback will affect the current app version. """ appyaml = self._ParseAppInfoFromYaml(self.basepath) appversion = AppVersionUpload(self._GetRpcServer(), appyaml, backend) appversion.in_transaction = True appversion.Rollback()
Python
def RequestLogs(self): """Write request logs to a file.""" if len(self.args) != 1: self.parser.error( 'Expected a <directory> argument and an <output_file> argument.') if (self.options.severity is not None and not 0 <= self.options.severity <= MAX_LOG_LEVEL): self.parser.error( 'Severity range is 0 (DEBUG) through %s (CRITICAL).' % MAX_LOG_LEVEL) if self.options.num_days is None: self.options.num_days = int(not self.options.append) try: end_date = self._ParseEndDate(self.options.end_date) except (TypeError, ValueError): self.parser.error('End date must be in the format YYYY-MM-DD.') rpcserver = self._GetRpcServer() appyaml = self._ParseAppInfoFromYaml(self.basepath) logs_requester = LogsRequester(rpcserver, appyaml, self.args[0], self.options.num_days, self.options.append, self.options.severity, end_date, self.options.vhost, self.options.include_vhost, self.options.include_all, time_func=self.time_func) logs_requester.DownloadLogs()
def RequestLogs(self): """Write request logs to a file.""" if len(self.args) != 1: self.parser.error( 'Expected a <directory> argument and an <output_file> argument.') if (self.options.severity is not None and not 0 <= self.options.severity <= MAX_LOG_LEVEL): self.parser.error( 'Severity range is 0 (DEBUG) through %s (CRITICAL).' % MAX_LOG_LEVEL) if self.options.num_days is None: self.options.num_days = int(not self.options.append) try: end_date = self._ParseEndDate(self.options.end_date) except (TypeError, ValueError): self.parser.error('End date must be in the format YYYY-MM-DD.') rpcserver = self._GetRpcServer() appyaml = self._ParseAppInfoFromYaml(self.basepath) logs_requester = LogsRequester(rpcserver, appyaml, self.args[0], self.options.num_days, self.options.append, self.options.severity, end_date, self.options.vhost, self.options.include_vhost, self.options.include_all, time_func=self.time_func) logs_requester.DownloadLogs()
Python
def MakeSyncCall(self, service, call, request, response, request_id=None): """The main RPC entry point. Args: service: Must be name as provided to service_name of constructor. call: A string representing the rpc to make. Must be part of the underlying services methods and impemented by _Dynamic_<call>. request: A protocol buffer of the type corresponding to 'call'. response: A protocol buffer of the type corresponding to 'call'. request_id: A unique string identifying the request associated with the API call. """ assert service == self.__service_name, ('Expected "%s" service name, ' 'was "%s"' % (self.__service_name, service)) if request.ByteSize() > self.__max_request_size: raise apiproxy_errors.RequestTooLargeError( 'The request to API call %s.%s() was too large.' % (service, call)) messages = [] assert request.IsInitialized(messages), messages if self.__error: raise self.__error else: method = getattr(self, '_Dynamic_' + call) if self._ACCEPTS_REQUEST_ID: method(request, response, request_id) else: method(request, response)
def MakeSyncCall(self, service, call, request, response, request_id=None): """The main RPC entry point. Args: service: Must be name as provided to service_name of constructor. call: A string representing the rpc to make. Must be part of the underlying services methods and impemented by _Dynamic_<call>. request: A protocol buffer of the type corresponding to 'call'. response: A protocol buffer of the type corresponding to 'call'. request_id: A unique string identifying the request associated with the API call. """ assert service == self.__service_name, ('Expected "%s" service name, ' 'was "%s"' % (self.__service_name, service)) if request.ByteSize() > self.__max_request_size: raise apiproxy_errors.RequestTooLargeError( 'The request to API call %s.%s() was too large.' % (service, call)) messages = [] assert request.IsInitialized(messages), messages if self.__error: raise self.__error else: method = getattr(self, '_Dynamic_' + call) if self._ACCEPTS_REQUEST_ID: method(request, response, request_id) else: method(request, response)
Python
def SetError(self, error): """Set an error condition that is always raised when calls made to stub. Args: error: An instance of apiproxy_errors.Error or None for no error. """ assert error is None or isinstance(error, apiproxy_errors.Error) self.__error = error
def SetError(self, error): """Set an error condition that is always raised when calls made to stub. Args: error: An instance of apiproxy_errors.Error or None for no error. """ assert error is None or isinstance(error, apiproxy_errors.Error) self.__error = error
Python
def _CollectTerms(self, node): """Get all search terms for scoring.""" if node.getType() in search_util.TEXT_QUERY_TYPES: return set([query_parser.GetQueryNodeText(node).strip('"')]) elif node.children: if node.getType() is QueryParser.RESTRICTION and len(node.children) > 1: children = node.children[1:] else: children = node.children result = set() for term_set in (self._CollectTerms(child) for child in children): result.update(term_set) return result return set()
def _CollectTerms(self, node): """Get all search terms for scoring.""" if node.getType() in search_util.TEXT_QUERY_TYPES: return set([query_parser.GetQueryNodeText(node).strip('"')]) elif node.children: if node.getType() is QueryParser.RESTRICTION and len(node.children) > 1: children = node.children[1:] else: children = node.children result = set() for term_set in (self._CollectTerms(child) for child in children): result.update(term_set) return result return set()
Python
def _Evaluate(self, node, score=True): """Retrieve scored results for a search query.""" doc_match = document_matcher.DocumentMatcher( node, self._parser, self._inverted_index) matched_documents = doc_match.FilterDocuments(self._documents.itervalues()) terms = self._CollectTerms(node) scored_documents = [ _ScoredDocument(doc, self._ScoreDocument(doc, score, terms)) for doc in matched_documents] return scored_documents
def _Evaluate(self, node, score=True): """Retrieve scored results for a search query.""" doc_match = document_matcher.DocumentMatcher( node, self._parser, self._inverted_index) matched_documents = doc_match.FilterDocuments(self._documents.itervalues()) terms = self._CollectTerms(node) scored_documents = [ _ScoredDocument(doc, self._ScoreDocument(doc, score, terms)) for doc in matched_documents] return scored_documents
Python
def SortKey(scored_doc): """Return the sort key for a document based on the request parameters.""" field = search_util.GetFieldInDocument( scored_doc.document, sort_spec.sort_expression()) if not field: return default_value string_val = field.value().string_value() if field.value().type() in search_util.NUMBER_DOCUMENT_FIELD_TYPES: return float(string_val) if field.value().type() is document_pb.FieldValue.DATE: return search_util.EpochTime(search_util.DeserializeDate(string_val)) return string_val
def SortKey(scored_doc): """Return the sort key for a document based on the request parameters.""" field = search_util.GetFieldInDocument( scored_doc.document, sort_spec.sort_expression()) if not field: return default_value string_val = field.value().string_value() if field.value().type() in search_util.NUMBER_DOCUMENT_FIELD_TYPES: return float(string_val) if field.value().type() is document_pb.FieldValue.DATE: return search_util.EpochTime(search_util.DeserializeDate(string_val)) return string_val
Python
def Search(self, search_request): """Searches the simple index for .""" query = urllib.unquote(search_request.query()) query = query.strip() if not query: return [_ScoredDocument(document, 0) for document in copy.copy(self._documents.values())] if not isinstance(query, unicode): query = unicode(query, 'utf-8') query_tree = query_parser.Simplify(query_parser.Parse(query)) score = _ScoreRequested(search_request) docs = self._Evaluate(query_tree, score=score) docs = self._Sort(docs, search_request, score) docs = self._AttachExpressions(docs, search_request) return docs
def Search(self, search_request): """Searches the simple index for .""" query = urllib.unquote(search_request.query()) query = query.strip() if not query: return [_ScoredDocument(document, 0) for document in copy.copy(self._documents.values())] if not isinstance(query, unicode): query = unicode(query, 'utf-8') query_tree = query_parser.Simplify(query_parser.Parse(query)) score = _ScoreRequested(search_request) docs = self._Evaluate(query_tree, score=score) docs = self._Sort(docs, search_request, score) docs = self._AttachExpressions(docs, search_request) return docs
Python
def _FillSearchResponse(self, results, position_range, cursor_type, score, response, field_spec=None, ids_only=None): """Fills the SearchResponse with a selection of results.""" for i in position_range: result = results[i] search_result = response.add_result() self._CopyDocument(result.document, search_result.mutable_document(), field_spec, ids_only) if cursor_type is search_service_pb.SearchParams.PER_RESULT: search_result.set_cursor(result.document.id()) if score: search_result.add_score(result.score) for field, expression in result.expressions.iteritems(): expr = search_result.add_expression() expr.set_name(field) if (isinstance(expression, float) or isinstance(expression, long) or isinstance(expression, int)): expr.mutable_value().set_string_value(str(expression)) expr.mutable_value().set_type(document_pb.FieldValue.NUMBER) else: expr.mutable_value().set_string_value(expression)
def _FillSearchResponse(self, results, position_range, cursor_type, score, response, field_spec=None, ids_only=None): """Fills the SearchResponse with a selection of results.""" for i in position_range: result = results[i] search_result = response.add_result() self._CopyDocument(result.document, search_result.mutable_document(), field_spec, ids_only) if cursor_type is search_service_pb.SearchParams.PER_RESULT: search_result.set_cursor(result.document.id()) if score: search_result.add_score(result.score) for field, expression in result.expressions.iteritems(): expr = search_result.add_expression() expr.set_name(field) if (isinstance(expression, float) or isinstance(expression, long) or isinstance(expression, int)): expr.mutable_value().set_string_value(str(expression)) expr.mutable_value().set_type(document_pb.FieldValue.NUMBER) else: expr.mutable_value().set_string_value(expression)
Python
def _Dynamic_Search(self, request, response): """A local implementation of SearchService.Search RPC. Args: request: A search_service_pb.SearchRequest. response: An search_service_pb.SearchResponse. """ if request.has_app_id(): self._RandomSearchResponse(request, response) return index = None try: index = self._GetIndex(request.params().index_spec()) if index is None: self._UnknownIndex(response.mutable_status(), request.params().index_spec()) return except IndexConsistencyError, exception: self._InvalidRequest(response.mutable_status(), exception) return params = request.params() results = index.Search(params) response.set_matched_count(len(results)) offset = 0 if params.has_cursor(): positions = [i for i in range(len(results)) if results[i].document.id() is params.cursor()] if positions: offset = positions[0] + 1 elif params.has_offset(): offset = params.offset() if offset < len(results): position_range = range( offset, min(offset + params.limit(), len(results))) else: position_range = range(0) field_spec = None if params.has_field_spec(): field_spec = params.field_spec() self._FillSearchResponse(results, position_range, params.cursor_type(), _ScoreRequested(params), response, field_spec, params.keys_only()) if (params.cursor_type() is search_service_pb.SearchParams.SINGLE and len(position_range)): response.set_cursor( results[position_range[len(position_range) - 1]].document.id()) response.mutable_status().set_code(search_service_pb.SearchServiceError.OK)
def _Dynamic_Search(self, request, response): """A local implementation of SearchService.Search RPC. Args: request: A search_service_pb.SearchRequest. response: An search_service_pb.SearchResponse. """ if request.has_app_id(): self._RandomSearchResponse(request, response) return index = None try: index = self._GetIndex(request.params().index_spec()) if index is None: self._UnknownIndex(response.mutable_status(), request.params().index_spec()) return except IndexConsistencyError, exception: self._InvalidRequest(response.mutable_status(), exception) return params = request.params() results = index.Search(params) response.set_matched_count(len(results)) offset = 0 if params.has_cursor(): positions = [i for i in range(len(results)) if results[i].document.id() is params.cursor()] if positions: offset = positions[0] + 1 elif params.has_offset(): offset = params.offset() if offset < len(results): position_range = range( offset, min(offset + params.limit(), len(results))) else: position_range = range(0) field_spec = None if params.has_field_spec(): field_spec = params.field_spec() self._FillSearchResponse(results, position_range, params.cursor_type(), _ScoreRequested(params), response, field_spec, params.keys_only()) if (params.cursor_type() is search_service_pb.SearchParams.SINGLE and len(position_range)): response.set_cursor( results[position_range[len(position_range) - 1]].document.id()) response.mutable_status().set_code(search_service_pb.SearchServiceError.OK)
Python
def Write(self): """Write search indexes to the index file. This method is a no-op if index_file is set to None. """ if not self.__index_file: return descriptor, tmp_filename = tempfile.mkstemp( dir=os.path.dirname(self.__index_file)) tmpfile = os.fdopen(descriptor, 'wb') pickler = pickle.Pickler(tmpfile, protocol=1) pickler.fast = True pickler.dump(self.__indexes) tmpfile.close() self.__index_file_lock.acquire() try: try: os.rename(tmp_filename, self.__index_file) except OSError: os.remove(self.__index_file) os.rename(tmp_filename, self.__index_file) finally: self.__index_file_lock.release()
def Write(self): """Write search indexes to the index file. This method is a no-op if index_file is set to None. """ if not self.__index_file: return descriptor, tmp_filename = tempfile.mkstemp( dir=os.path.dirname(self.__index_file)) tmpfile = os.fdopen(descriptor, 'wb') pickler = pickle.Pickler(tmpfile, protocol=1) pickler.fast = True pickler.dump(self.__indexes) tmpfile.close() self.__index_file_lock.acquire() try: try: os.rename(tmp_filename, self.__index_file) except OSError: os.remove(self.__index_file) os.rename(tmp_filename, self.__index_file) finally: self.__index_file_lock.release()
Python
def login(auth_data): """Returns a Requests session which is logged in. Raises ConnectionRefusedError if login info is incorrect.""" s = requests.Session() data = { "txtUser": auth_data["username"], "txtId": auth_data["id"], "txtPass": auth_data["password"], "enter": "כניסה", "javas": "1", "src": "" } login_result = s.post( "https://www.ims.tau.ac.il/Tal/Login_Chk.aspx", data=data) # Login was successful if it redirects to /Tal/Sys/Main.aspx?... if "://www.ims.tau.ac.il/Tal/Sys/Main.aspx" not in login_result.url: raise ConnectionRefusedError return s
def login(auth_data): """Returns a Requests session which is logged in. Raises ConnectionRefusedError if login info is incorrect.""" s = requests.Session() data = { "txtUser": auth_data["username"], "txtId": auth_data["id"], "txtPass": auth_data["password"], "enter": "כניסה", "javas": "1", "src": "" } login_result = s.post( "https://www.ims.tau.ac.il/Tal/Login_Chk.aspx", data=data) # Login was successful if it redirects to /Tal/Sys/Main.aspx?... if "://www.ims.tau.ac.il/Tal/Sys/Main.aspx" not in login_result.url: raise ConnectionRefusedError return s
Python
def login_or_cached(auth_data, cache_path=sess_file): """Tries to use a cached session, if it's from the last hour""" time = timestamp() try: with open(cache_path, "rb") as f: pickled_data = pickle.load(f) if pickled_data["username"] == auth_data["username"] and \ pickled_data["id"] == auth_data["id"] and \ pickled_data["password"] == auth_data["password"] and \ pickled_data["time"] > (time - 3600): # From the last hour, and is of the same parameters session = requests.Session() session.cookies.update(pickled_data["cookies"]) return session except FileNotFoundError: pass # Save a new session to the cache new_session = login(auth_data) with open(cache_path, "wb") as f: data = { "username": auth_data["username"], "id": auth_data["id"], "password": auth_data["password"], "time": time, "cookies": new_session.cookies } pickle.dump(data, f) return new_session
def login_or_cached(auth_data, cache_path=sess_file): """Tries to use a cached session, if it's from the last hour""" time = timestamp() try: with open(cache_path, "rb") as f: pickled_data = pickle.load(f) if pickled_data["username"] == auth_data["username"] and \ pickled_data["id"] == auth_data["id"] and \ pickled_data["password"] == auth_data["password"] and \ pickled_data["time"] > (time - 3600): # From the last hour, and is of the same parameters session = requests.Session() session.cookies.update(pickled_data["cookies"]) return session except FileNotFoundError: pass # Save a new session to the cache new_session = login(auth_data) with open(cache_path, "wb") as f: data = { "username": auth_data["username"], "id": auth_data["id"], "password": auth_data["password"], "time": time, "cookies": new_session.cookies } pickle.dump(data, f) return new_session
Python
def buy_operation(ticker, quantity): """ send a POST request to "/v2/orders" to create a new order :param ticker: stock ticker :param quantity: quantity to buy :return: confirmation that the order to buy has been opened """ url = BASE_URL + "/v2/orders" payload = json.dumps({ "symbol": ticker, "qty": quantity, "side": "buy", "type": "market", "time_in_force": "day" }) headers = { 'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY, 'Content-Type': 'application/json' } return requests.request("POST", url, headers=headers, data=payload).json()
def buy_operation(ticker, quantity): """ send a POST request to "/v2/orders" to create a new order :param ticker: stock ticker :param quantity: quantity to buy :return: confirmation that the order to buy has been opened """ url = BASE_URL + "/v2/orders" payload = json.dumps({ "symbol": ticker, "qty": quantity, "side": "buy", "type": "market", "time_in_force": "day" }) headers = { 'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY, 'Content-Type': 'application/json' } return requests.request("POST", url, headers=headers, data=payload).json()
Python
def close_position(ticker): """ sends a DELETE request to "/v2/positions/" to liquidate an open position :param ticker: stock ticker :return: confirmation that the position has been closed """ url = BASE_URL + "/v2/positions/" + ticker headers = { 'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY } return requests.request("DELETE", url, headers=headers).json()
def close_position(ticker): """ sends a DELETE request to "/v2/positions/" to liquidate an open position :param ticker: stock ticker :return: confirmation that the position has been closed """ url = BASE_URL + "/v2/positions/" + ticker headers = { 'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY } return requests.request("DELETE", url, headers=headers).json()
Python
def openmailbox(inmailboxpath,outmailboxpath): """ Open a mailbox (maildir) at the given path and cycle on all te given emails. """ # If Factory = mailbox.MaildirMessage or rfc822.Message any update moves the email in /new from /cur # see > http://stackoverflow.com/a/13445253/1435167 mbox = mailbox.Maildir(inmailboxpath, factory=None) # iterate all the emails in the hierarchy for key, msg in mbox.iteritems(): # ToDo Skip messages without 'attachment' without parsing parts,but what are attachments? # I retain text/plain and text/html. # if 'alternative' in msg.get_content_type(): # if msg.is_multipart(): print 'Key : ',key print 'Subject : ',msg.get('Subject') if options.verbose: print 'Multip. : ',msg.is_multipart() print 'Content-Type : ',msg.get('Content-Type') print 'Parts : ' detach(msg, key, outmailboxpath, mbox) print '='*20
def openmailbox(inmailboxpath,outmailboxpath): """ Open a mailbox (maildir) at the given path and cycle on all te given emails. """ # If Factory = mailbox.MaildirMessage or rfc822.Message any update moves the email in /new from /cur # see > http://stackoverflow.com/a/13445253/1435167 mbox = mailbox.Maildir(inmailboxpath, factory=None) # iterate all the emails in the hierarchy for key, msg in mbox.iteritems(): # ToDo Skip messages without 'attachment' without parsing parts,but what are attachments? # I retain text/plain and text/html. # if 'alternative' in msg.get_content_type(): # if msg.is_multipart(): print 'Key : ',key print 'Subject : ',msg.get('Subject') if options.verbose: print 'Multip. : ',msg.is_multipart() print 'Content-Type : ',msg.get('Content-Type') print 'Parts : ' detach(msg, key, outmailboxpath, mbox) print '='*20
Python
def detach(msg, key, outmailboxpath, mbox): """ Cycle all the part of message, detach all the not text or multipart content type to outmailboxpath delete the header and rewrite is as a text inline message log. """ print '-----' for part in msg.walk(): content_maintype = part.get_content_maintype() if (content_maintype != 'text') & (content_maintype != 'multipart'): filename = part.get_filename() if options.verbose: print ' Content-Disposition : ', part.get('Content-Disposition') print ' maintytpe : ',part.get_content_maintype() print ' %s : %s' % (part.get_content_type(),filename) outpath = outmailboxpath+key+'/' if options.save_attach: try: os.makedirs(outpath) except OSError: if not os.path.isdir(outpath): raise if filename is None: import tempfile fp = tempfile.NamedTemporaryFile(dir=outpath, delete=False) filename = os.path.basename(fp.name) print("Will save in {}".format(fp.name)) else: fp = open(outpath+filename, 'wb') fp.write(part.get_payload(decode=1) or "") fp.close() outmessage = ' ATTACHMENT=%s\n moved to\n OUTPATH=%s' %(filename,outpath[len(OUTPATH):]+filename) if options.del_attach: # rewrite header and delete attachment in payload tmp = [part.__delitem__(h) for h in part.keys()] part.set_payload(outmessage) part.set_param('Content-Type','text/html; charset=ISO-8859-1') part.set_param('Content-Disposition','inline') mbox.__setitem__(key, msg) print outmessage print '-----'
def detach(msg, key, outmailboxpath, mbox): """ Cycle all the part of message, detach all the not text or multipart content type to outmailboxpath delete the header and rewrite is as a text inline message log. """ print '-----' for part in msg.walk(): content_maintype = part.get_content_maintype() if (content_maintype != 'text') & (content_maintype != 'multipart'): filename = part.get_filename() if options.verbose: print ' Content-Disposition : ', part.get('Content-Disposition') print ' maintytpe : ',part.get_content_maintype() print ' %s : %s' % (part.get_content_type(),filename) outpath = outmailboxpath+key+'/' if options.save_attach: try: os.makedirs(outpath) except OSError: if not os.path.isdir(outpath): raise if filename is None: import tempfile fp = tempfile.NamedTemporaryFile(dir=outpath, delete=False) filename = os.path.basename(fp.name) print("Will save in {}".format(fp.name)) else: fp = open(outpath+filename, 'wb') fp.write(part.get_payload(decode=1) or "") fp.close() outmessage = ' ATTACHMENT=%s\n moved to\n OUTPATH=%s' %(filename,outpath[len(OUTPATH):]+filename) if options.del_attach: # rewrite header and delete attachment in payload tmp = [part.__delitem__(h) for h in part.keys()] part.set_payload(outmessage) part.set_param('Content-Type','text/html; charset=ISO-8859-1') part.set_param('Content-Disposition','inline') mbox.__setitem__(key, msg) print outmessage print '-----'
Python
def update_vectors(self): """ Update vecs and distances with the same indices """ if np.max(np.absolute(self.vecs)) > 0.49*np.min(np.linalg.norm(self._ref_structure.cell, axis=-1)): raise AssertionError('Largest distance value is larger than half the box -> rerun get_neighbors') myself = np.ones_like(self.indices)*np.arange(len(self.indices))[:, np.newaxis] vecs = self._ref_structure.get_distances( myself.flatten(), self.indices.flatten(), mic=np.all(self._ref_structure.pbc), vector=True ) self.vecs = vecs.reshape(self.vecs.shape) self.distances = np.linalg.norm(self.vecs, axis=-1)
def update_vectors(self): """ Update vecs and distances with the same indices """ if np.max(np.absolute(self.vecs)) > 0.49*np.min(np.linalg.norm(self._ref_structure.cell, axis=-1)): raise AssertionError('Largest distance value is larger than half the box -> rerun get_neighbors') myself = np.ones_like(self.indices)*np.arange(len(self.indices))[:, np.newaxis] vecs = self._ref_structure.get_distances( myself.flatten(), self.indices.flatten(), mic=np.all(self._ref_structure.pbc), vector=True ) self.vecs = vecs.reshape(self.vecs.shape) self.distances = np.linalg.norm(self.vecs, axis=-1)
Python
def cluster_by_vecs(self, bandwidth=None, n_jobs=None, max_iter=300): """ Method to group vectors which have similar values. This method should be used as a part of neigh.get_global_shells(cluster_by_vecs=True) or neigh.get_local_shells(cluster_by_vecs=True). However, in order to specify certain arguments (such as n_jobs or max_iter), it might help to have run this function before calling parent functions, as the data obtained with this function will be stored in the variable `_cluster_vecs` Args: bandwidth (float): Resolution (cf. sklearn.cluster.MeanShift) n_jobs (int): Number of cores (cf. sklearn.cluster.MeanShift) max_iter (int): Number of maximum iterations (cf. sklearn.cluster.MeanShift) """ if bandwidth is None: bandwidth = 0.2*np.min(self.distances) dr = self.vecs.copy().reshape(-1, 3) self._cluster_vecs = MeanShift(bandwidth=bandwidth, n_jobs=n_jobs, max_iter=max_iter).fit(dr) self._cluster_vecs.labels_ = self._cluster_vecs.labels_.reshape(self.indices.shape)
def cluster_by_vecs(self, bandwidth=None, n_jobs=None, max_iter=300): """ Method to group vectors which have similar values. This method should be used as a part of neigh.get_global_shells(cluster_by_vecs=True) or neigh.get_local_shells(cluster_by_vecs=True). However, in order to specify certain arguments (such as n_jobs or max_iter), it might help to have run this function before calling parent functions, as the data obtained with this function will be stored in the variable `_cluster_vecs` Args: bandwidth (float): Resolution (cf. sklearn.cluster.MeanShift) n_jobs (int): Number of cores (cf. sklearn.cluster.MeanShift) max_iter (int): Number of maximum iterations (cf. sklearn.cluster.MeanShift) """ if bandwidth is None: bandwidth = 0.2*np.min(self.distances) dr = self.vecs.copy().reshape(-1, 3) self._cluster_vecs = MeanShift(bandwidth=bandwidth, n_jobs=n_jobs, max_iter=max_iter).fit(dr) self._cluster_vecs.labels_ = self._cluster_vecs.labels_.reshape(self.indices.shape)
Python
def from_hdf(self, hdf, group_name="structure"): """ Retrieve the object from a HDF5 file Args: hdf (pyiron_base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved group_name (str): Group name from which the Atoms object is retreived. Returns: pyiron_atomistic.structure.atoms.Atoms: The retrieved atoms class """ if "indices" in hdf[group_name].list_nodes(): with hdf.open(group_name) as hdf_atoms: if "new_species" in hdf_atoms.list_groups(): with hdf_atoms.open("new_species") as hdf_species: self._pse.from_hdf(hdf_species) el_object_list = [ self.convert_element(el, self._pse) for el in hdf_atoms["species"] ] self.indices = hdf_atoms["indices"] self._tag_list._length = len(self.indices) self.set_species(el_object_list) self.bonds = None tr_dict = {1: True, 0: False} self.dimension = hdf_atoms["dimension"] self.units = hdf_atoms["units"] if "cell" in hdf_atoms.list_groups(): with hdf_atoms.open("cell") as hdf_cell: self.cell = hdf_cell["cell"] self.pbc = hdf_cell["pbc"] # Backward compatibility position_tag = "positions" if position_tag not in hdf_atoms.list_nodes(): position_tag = "coordinates" if "is_absolute" in hdf_atoms.list_nodes(): if not tr_dict[hdf_atoms["is_absolute"]]: self.set_scaled_positions(hdf_atoms[position_tag]) else: self.arrays['positions'] = hdf_atoms[position_tag] else: self.arrays['positions'] = hdf_atoms[position_tag] self.arrays['numbers'] = self.get_atomic_numbers() if "explicit_bonds" in hdf_atoms.list_nodes(): # print "bonds: " self.bonds = hdf_atoms["explicit_bonds"] if "tags" in hdf_atoms.list_groups(): with hdf_atoms.open("tags") as hdf_tags: tags = hdf_tags.list_nodes() for tag in tags: # tr_dict = {'0': False, '1': True} if isinstance(hdf_tags[tag], (list, np.ndarray)): my_list = hdf_tags[tag] self._tag_list[tag] = SparseList( my_list, length=len(self) ) else: my_dict = hdf_tags.get_pandas(tag).to_dict() my_dict = { i: val for i, val in zip( my_dict["index"], my_dict["values"] ) } self._tag_list[tag] = SparseList( my_dict, length=len(self) ) if "bonds" in hdf_atoms.list_nodes(): self.bonds = hdf_atoms["explicit_bonds"] self._high_symmetry_points = None if "high_symmetry_points" in hdf_atoms.list_nodes(): self._high_symmetry_points = hdf_atoms["high_symmetry_points"] self._high_symmetry_path = None if "high_symmetry_path" in hdf_atoms.list_nodes(): self._high_symmetry_path = hdf_atoms["high_symmetry_path"] if "info" in hdf_atoms.list_nodes(): self.info = hdf_atoms["info"] return self else: return self._from_hdf_old(hdf, group_name)
def from_hdf(self, hdf, group_name="structure"): """ Retrieve the object from a HDF5 file Args: hdf (pyiron_base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved group_name (str): Group name from which the Atoms object is retreived. Returns: pyiron_atomistic.structure.atoms.Atoms: The retrieved atoms class """ if "indices" in hdf[group_name].list_nodes(): with hdf.open(group_name) as hdf_atoms: if "new_species" in hdf_atoms.list_groups(): with hdf_atoms.open("new_species") as hdf_species: self._pse.from_hdf(hdf_species) el_object_list = [ self.convert_element(el, self._pse) for el in hdf_atoms["species"] ] self.indices = hdf_atoms["indices"] self._tag_list._length = len(self.indices) self.set_species(el_object_list) self.bonds = None tr_dict = {1: True, 0: False} self.dimension = hdf_atoms["dimension"] self.units = hdf_atoms["units"] if "cell" in hdf_atoms.list_groups(): with hdf_atoms.open("cell") as hdf_cell: self.cell = hdf_cell["cell"] self.pbc = hdf_cell["pbc"] # Backward compatibility position_tag = "positions" if position_tag not in hdf_atoms.list_nodes(): position_tag = "coordinates" if "is_absolute" in hdf_atoms.list_nodes(): if not tr_dict[hdf_atoms["is_absolute"]]: self.set_scaled_positions(hdf_atoms[position_tag]) else: self.arrays['positions'] = hdf_atoms[position_tag] else: self.arrays['positions'] = hdf_atoms[position_tag] self.arrays['numbers'] = self.get_atomic_numbers() if "explicit_bonds" in hdf_atoms.list_nodes(): # print "bonds: " self.bonds = hdf_atoms["explicit_bonds"] if "tags" in hdf_atoms.list_groups(): with hdf_atoms.open("tags") as hdf_tags: tags = hdf_tags.list_nodes() for tag in tags: # tr_dict = {'0': False, '1': True} if isinstance(hdf_tags[tag], (list, np.ndarray)): my_list = hdf_tags[tag] self._tag_list[tag] = SparseList( my_list, length=len(self) ) else: my_dict = hdf_tags.get_pandas(tag).to_dict() my_dict = { i: val for i, val in zip( my_dict["index"], my_dict["values"] ) } self._tag_list[tag] = SparseList( my_dict, length=len(self) ) if "bonds" in hdf_atoms.list_nodes(): self.bonds = hdf_atoms["explicit_bonds"] self._high_symmetry_points = None if "high_symmetry_points" in hdf_atoms.list_nodes(): self._high_symmetry_points = hdf_atoms["high_symmetry_points"] self._high_symmetry_path = None if "high_symmetry_path" in hdf_atoms.list_nodes(): self._high_symmetry_path = hdf_atoms["high_symmetry_path"] if "info" in hdf_atoms.list_nodes(): self.info = hdf_atoms["info"] return self else: return self._from_hdf_old(hdf, group_name)
Python
def analyse_pyscal_voronoi_volume(self): """ Calculate the Voronoi volume of atoms Args: atoms : (pyiron.structure.atoms.Atoms): The structure to analyze. """ from pyiron.atomistics.structure.pyscal import analyse_voronoi_volume return analyse_voronoi_volume(atoms=self)
def analyse_pyscal_voronoi_volume(self): """ Calculate the Voronoi volume of atoms Args: atoms : (pyiron.structure.atoms.Atoms): The structure to analyze. """ from pyiron.atomistics.structure.pyscal import analyse_voronoi_volume return analyse_voronoi_volume(atoms=self)
Python
def default(outname, *args, **kwargs): ''' Utility function writing a default YAML setting file if none is found. Parameters ---------- outname : str name of the output YAML file ''' configuration = {'font' : 'fixed,30,-1,5,75,0,0,0,0,0', 'color' : '#ffdd1c', 'x' : 0, 'y' : 0, 'opacity' : 1, 'blinkPeriod' : '00:00:01', 'blinkFreq' : 100, 'blinkNb' : 3 } writeConfiguration(outname, configuration) return
def default(outname, *args, **kwargs): ''' Utility function writing a default YAML setting file if none is found. Parameters ---------- outname : str name of the output YAML file ''' configuration = {'font' : 'fixed,30,-1,5,75,0,0,0,0,0', 'color' : '#ffdd1c', 'x' : 0, 'y' : 0, 'opacity' : 1, 'blinkPeriod' : '00:00:01', 'blinkFreq' : 100, 'blinkNb' : 3 } writeConfiguration(outname, configuration) return
Python
def writeConfiguration(outname, configuration, *args, **kwargs): ''' Utility function to write the YAML configuration file with the given parameters. Parameters ---------- configuration : dict dictionnary to be converted into a YAML file outname : str name of the output YAML file ''' output = dump(configuration, Dumper=Dumper) with open(outname, 'w') as f: f.write(output) return
def writeConfiguration(outname, configuration, *args, **kwargs): ''' Utility function to write the YAML configuration file with the given parameters. Parameters ---------- configuration : dict dictionnary to be converted into a YAML file outname : str name of the output YAML file ''' output = dump(configuration, Dumper=Dumper) with open(outname, 'w') as f: f.write(output) return
Python
def init(scriptDir, *args, **kwargs): ''' Initialise code parameters at startup. Parameters --------- sriptDir : str location of the code and the yaml file(s) Return the settings dictionnary and an error code (0 if ok, -1 if error). ''' file = opath.join(scriptDir, 'settings.yaml') # If the file does not exist, a default one is created if not opath.isfile(file): default(file) # Load configuration option from setting file with open(file, 'r') as f: settings = load(f, Loader=Loader) # If a key is missing, the setting file is saved and a new one is created with default value errCode = 0 for i in ['font', 'color', 'x', 'y', 'opacity', 'blinkPeriod', 'blinkFreq', 'blinkNb']: if i not in settings.keys(): print('Error in setting file %s. The key %s was missing. Generating a new default configuration file instead.' %(file, i)) # Save copy path, newname = opath.split(file) newname = opath.join(path, r'~%s' %newname) os.rename(file, newname) # Generate and load new default settings default(file) settings = load(file, Loader=Loader) errCode = -1 ################################################ # Check parameters # ################################################ # X coordinate if not isinstance(settings['x'], int) or settings['x'] < 0: print('Given x coordinate is < 0 or is not an int. Using 0 as default value instead.') settings['x'] = 0 else: settings['x'] = int(settings['x']) # Y coordinate if not isinstance(settings['y'], int) or settings['y'] < 0: print('Given y coordinate is < 0 or is not an int. Using 0 as default value instead.') settings['y'] = 0 else: settings['y'] = int(settings['y']) # Opacity if not isinstance(settings['opacity'], (int, float)) or settings['opacity'] < 0 or settings['opacity'] > 1: print('Given opacity is not in the range [0, 1] or is not an int/float. Using 1 as default value instead.') settings['opacity'] = 1 # Period is changed from a string to a PyQt Qtime object period = QTime().fromString(settings['blinkPeriod']) if period.isNull(): print('Given blinking period could not be broadcasted to a valid PyQt QTime object. Using 1s as default value instead.') settings['blinkPeriod'] = Qtime(0, 0, 1) else: settings['blinkPeriod'] = period # Blinking frequency if not isinstance(settings['blinkFreq'], (int, float)): print('Given bliking frequency is not an int/float. Using 100ms as default value instead.') settings['blinkFreq'] = 100 else: if settings['blinkFreq'] < 50: print('Given blinking frequency is below minimum value. Clipping to 50ms as default value instead.') settings['blinkFreq'] = 50 elif settings['blinkFreq'] > 10000: print('Given bliking frequency is above maximum value. Clipping to 10s as default value instead.') settings['blinkFreq'] = 10000 else: settings['blinkFreq'] = int(settings['blinkFreq']) # Blinking number if not isinstance(settings['blinkNb'], int) or settings['blinkNb'] <= 0: print('Given blinking number is <= 0 or is not an int. Using 3 as default value instead.') settings['blinkNb'] = 3 return settings, errCode
def init(scriptDir, *args, **kwargs): ''' Initialise code parameters at startup. Parameters --------- sriptDir : str location of the code and the yaml file(s) Return the settings dictionnary and an error code (0 if ok, -1 if error). ''' file = opath.join(scriptDir, 'settings.yaml') # If the file does not exist, a default one is created if not opath.isfile(file): default(file) # Load configuration option from setting file with open(file, 'r') as f: settings = load(f, Loader=Loader) # If a key is missing, the setting file is saved and a new one is created with default value errCode = 0 for i in ['font', 'color', 'x', 'y', 'opacity', 'blinkPeriod', 'blinkFreq', 'blinkNb']: if i not in settings.keys(): print('Error in setting file %s. The key %s was missing. Generating a new default configuration file instead.' %(file, i)) # Save copy path, newname = opath.split(file) newname = opath.join(path, r'~%s' %newname) os.rename(file, newname) # Generate and load new default settings default(file) settings = load(file, Loader=Loader) errCode = -1 ################################################ # Check parameters # ################################################ # X coordinate if not isinstance(settings['x'], int) or settings['x'] < 0: print('Given x coordinate is < 0 or is not an int. Using 0 as default value instead.') settings['x'] = 0 else: settings['x'] = int(settings['x']) # Y coordinate if not isinstance(settings['y'], int) or settings['y'] < 0: print('Given y coordinate is < 0 or is not an int. Using 0 as default value instead.') settings['y'] = 0 else: settings['y'] = int(settings['y']) # Opacity if not isinstance(settings['opacity'], (int, float)) or settings['opacity'] < 0 or settings['opacity'] > 1: print('Given opacity is not in the range [0, 1] or is not an int/float. Using 1 as default value instead.') settings['opacity'] = 1 # Period is changed from a string to a PyQt Qtime object period = QTime().fromString(settings['blinkPeriod']) if period.isNull(): print('Given blinking period could not be broadcasted to a valid PyQt QTime object. Using 1s as default value instead.') settings['blinkPeriod'] = Qtime(0, 0, 1) else: settings['blinkPeriod'] = period # Blinking frequency if not isinstance(settings['blinkFreq'], (int, float)): print('Given bliking frequency is not an int/float. Using 100ms as default value instead.') settings['blinkFreq'] = 100 else: if settings['blinkFreq'] < 50: print('Given blinking frequency is below minimum value. Clipping to 50ms as default value instead.') settings['blinkFreq'] = 50 elif settings['blinkFreq'] > 10000: print('Given bliking frequency is above maximum value. Clipping to 10s as default value instead.') settings['blinkFreq'] = 10000 else: settings['blinkFreq'] = int(settings['blinkFreq']) # Blinking number if not isinstance(settings['blinkNb'], int) or settings['blinkNb'] <= 0: print('Given blinking number is <= 0 or is not an int. Using 3 as default value instead.') settings['blinkNb'] = 3 return settings, errCode
Python
def blinkWindow(self, *args, **kwargs): ''' Creates a window to setup blinking. If blinking is activated, calling this function deactivates it. ''' if self.blinkActive: # Reset to default values and show back the clock self.blinkActive = False # Stop timers self.smalltimer.stop() self.blinktimer.stop() # Resume previous opacity self.opacity = self._opacity self.setLabelOpacity(self.opacity) else: blinkDialog = BlinkWindow(self) blinkDialog.show() return
def blinkWindow(self, *args, **kwargs): ''' Creates a window to setup blinking. If blinking is activated, calling this function deactivates it. ''' if self.blinkActive: # Reset to default values and show back the clock self.blinkActive = False # Stop timers self.smalltimer.stop() self.blinktimer.stop() # Resume previous opacity self.opacity = self._opacity self.setLabelOpacity(self.opacity) else: blinkDialog = BlinkWindow(self) blinkDialog.show() return
Python
def blink_text(self, *args, **kwargs): '''Function used to make the text blink a given number of times.''' self.cnt += 1 self.setLabelOpacity(1-self.opacity) # Blink 9 times for now and then stop small timer if self.cnt == self.blinkNb: self.setLabelOpacity(0) self.smalltimer.stop() return
def blink_text(self, *args, **kwargs): '''Function used to make the text blink a given number of times.''' self.cnt += 1 self.setLabelOpacity(1-self.opacity) # Blink 9 times for now and then stop small timer if self.cnt == self.blinkNb: self.setLabelOpacity(0) self.smalltimer.stop() return
Python
def run_blink(self, *args, **kwargs): '''Function called ever time the text must be blinked.''' # Counter used to know when to stop the flickering self.cnt = 0 self.smalltimer.start(100) return
def run_blink(self, *args, **kwargs): '''Function called ever time the text must be blinked.''' # Counter used to know when to stop the flickering self.cnt = 0 self.smalltimer.start(100) return
Python
def start_blink(self, blinkfreq, period, nb, *args, **kwargs): ''' Starts blinking of the clock. :param float blinkfreq: number of ms between blinks :param QTimeEdit period: time between two blink phases :param int nb: number of blinks per blink phase ''' if not isinstance(blinkfreq, (int, float)): raise TypeError('Blinking frequency must be an int but is given as a %s' %type(blinkfreq)) else: blinkfreq = int(blinkfreq) if blinkfreq <= 0: raise ValueError('Blinking frequency must be positive only (current value is %f)' %blinkfreq) if not isinstance(nb, (int, float)): raise TypeError('Number of blinks must be int but is given as a %s' %type(nb)) else: nb = int(nb) if nb <= 0: raise ValueError('Number of blinks must be positive only (current value is %d)' %nb) # Store values if the user save the current configuration later on self.blinkActive = True self.blinkNb = nb self.blinkFreq = blinkfreq # in ms self.blinkPeriod = period # Period between blinking phases in ms period_ms = sum([int(i)*60**pos for pos, i in enumerate(self.blinkPeriod.toString().split(':')[::-1])]) * 1000 # Save opacity for when we go back to normal self._opacity = self.opacity self.setLabelOpacity(0) self.blinktimer.start(period_ms) return
def start_blink(self, blinkfreq, period, nb, *args, **kwargs): ''' Starts blinking of the clock. :param float blinkfreq: number of ms between blinks :param QTimeEdit period: time between two blink phases :param int nb: number of blinks per blink phase ''' if not isinstance(blinkfreq, (int, float)): raise TypeError('Blinking frequency must be an int but is given as a %s' %type(blinkfreq)) else: blinkfreq = int(blinkfreq) if blinkfreq <= 0: raise ValueError('Blinking frequency must be positive only (current value is %f)' %blinkfreq) if not isinstance(nb, (int, float)): raise TypeError('Number of blinks must be int but is given as a %s' %type(nb)) else: nb = int(nb) if nb <= 0: raise ValueError('Number of blinks must be positive only (current value is %d)' %nb) # Store values if the user save the current configuration later on self.blinkActive = True self.blinkNb = nb self.blinkFreq = blinkfreq # in ms self.blinkPeriod = period # Period between blinking phases in ms period_ms = sum([int(i)*60**pos for pos, i in enumerate(self.blinkPeriod.toString().split(':')[::-1])]) * 1000 # Save opacity for when we go back to normal self._opacity = self.opacity self.setLabelOpacity(0) self.blinktimer.start(period_ms) return
Python
def changeColor(self, *args, **kwargs): '''Ask for a text color and change it.''' color = QColorDialog.getColor() if color.isValid(): self.color = color self.label.setStyleSheet('QLabel { color: %s }' %self.color.name()) return
def changeColor(self, *args, **kwargs): '''Ask for a text color and change it.''' color = QColorDialog.getColor() if color.isValid(): self.color = color self.label.setStyleSheet('QLabel { color: %s }' %self.color.name()) return
Python
def changeFont(self, *args, **kwargs): '''Ask for a text style and change it.''' font, ok = QFontDialog.getFont() if ok: self.font = font self.updateFont() return
def changeFont(self, *args, **kwargs): '''Ask for a text style and change it.''' font, ok = QFontDialog.getFont() if ok: self.font = font self.updateFont() return
Python
def keyPressEvent(self, e, *args, **kwargs): ''''Actions taken when a key is pressed.''' # Deal with shift key being pressed first if e.modifiers() & Qt.ShiftModifier: if e.key() == Qt.Key_Up: self.setLabelOpacity(self.opacity+0.05) elif e.key() == Qt.Key_Down: self.setLabelOpacity(self.opacity-0.05) else: if e.key() == Qt.Key_Down: newSize = self.font.pointSize()-1 if newSize < 1: newSize = 1 self.font.setPointSize(newSize) elif e.key() == Qt.Key_Up: self.font.setPointSize(self.font.pointSize()+1) else: return self.updateFont() return
def keyPressEvent(self, e, *args, **kwargs): ''''Actions taken when a key is pressed.''' # Deal with shift key being pressed first if e.modifiers() & Qt.ShiftModifier: if e.key() == Qt.Key_Up: self.setLabelOpacity(self.opacity+0.05) elif e.key() == Qt.Key_Down: self.setLabelOpacity(self.opacity-0.05) else: if e.key() == Qt.Key_Down: newSize = self.font.pointSize()-1 if newSize < 1: newSize = 1 self.font.setPointSize(newSize) elif e.key() == Qt.Key_Up: self.font.setPointSize(self.font.pointSize()+1) else: return self.updateFont() return
Python
def updateFont(self, *args, **kwargs): '''Update the label font with the value given in self.font and update the window size accordingly.''' self.label.setFont(self.font) width = self.label.fontMetrics().width(self.label.text())+2 height = self.label.fontMetrics().height()+2 self.setFixedSize(width, height) return
def updateFont(self, *args, **kwargs): '''Update the label font with the value given in self.font and update the window size accordingly.''' self.label.setFont(self.font) width = self.label.fontMetrics().width(self.label.text())+2 height = self.label.fontMetrics().height()+2 self.setFixedSize(width, height) return
Python
def showTime(self, *args, **kwargs): '''Update the time label when value has changed.''' time = QDateTime.currentDateTime() timeStr = time.toString('hh:mm') if timeStr != self.label.text(): self.label.setText(timeStr) return
def showTime(self, *args, **kwargs): '''Update the time label when value has changed.''' time = QDateTime.currentDateTime() timeStr = time.toString('hh:mm') if timeStr != self.label.text(): self.label.setText(timeStr) return
Python
def cancel(self, *args, **kwargs): '''When cancel is pressed blinking parameters are updated it the user wants to save later on.''' self.parent.blinkNb = self.blnbedit.value() self.parent.blinkPeriod = self.tedit.time() self.parent.blinkFreq = self.lenedit.value() self.close() return
def cancel(self, *args, **kwargs): '''When cancel is pressed blinking parameters are updated it the user wants to save later on.''' self.parent.blinkNb = self.blnbedit.value() self.parent.blinkPeriod = self.tedit.time() self.parent.blinkFreq = self.lenedit.value() self.close() return
Python
def keyPressEvent(self, e, *args, **kwargs): '''Actions taken when a key is pressed.''' if e.key() == Qt.Key_Escape: self.cancel(*args, **kwargs) elif e.key() == Qt.Key_Return: self.ok(*args, **kwargs) return
def keyPressEvent(self, e, *args, **kwargs): '''Actions taken when a key is pressed.''' if e.key() == Qt.Key_Escape: self.cancel(*args, **kwargs) elif e.key() == Qt.Key_Return: self.ok(*args, **kwargs) return
Python
def ok(self, *args, **kwargs): '''Start blinking when ok is pressed.''' self.parent.start_blink(self.lenedit.value(), self.tedit.time(), self.blnbedit.value()) self.close() return
def ok(self, *args, **kwargs): '''Start blinking when ok is pressed.''' self.parent.start_blink(self.lenedit.value(), self.tedit.time(), self.blnbedit.value()) self.close() return
Python
def wikimedia_request(page_name, start_date, end_date = None): ''' A fucntion that makes requests to the wikimedia pagecveiws API Parameters ---------- page_name : string A string containing the name of the wikipeida page you would like pageviews for start_date : string a date string YYYY/MM/DD indicating the first date that the request should return end_date : string a date string YYYY/MM/DD indicating the last date that the request should return. defaults to system date Returns ------- df : pandas DataFrame A dataframe with the article name and the number of pageviews. ''' # get rid of the / in the date sdate = start_date.split("/") # join together the text that was split sdate = ''.join(sdate) # if an end date is not specified if end_date == None: #get the current date end_date = str(datetime.datetime.now())[0:10].split("-") edate = ''.join(end_date) else: # use date from end_date argument edate = end_date.split("/") edate = edate[0] + edate[1] + edate[2] # use these elements to make an api request r = requests.get( "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia.org/all-access/all-agents/{}/daily/{}/{}".format(page_name,sdate, edate) ) # get the json result = r.json() # convert to dateframe df = pd.DataFrame(result['items']) # the wikimedia api returns 2 extra zeros at the end of the timestamp for some reason df['timestamp'] = [i[:-2] for i in df.timestamp] # convert to datetime df['timestamp'] = pd.to_datetime(df['timestamp']) # set timestamp as index df.set_index('timestamp', inplace = True) # return the article and views columns return df[['article', 'views']]
def wikimedia_request(page_name, start_date, end_date = None): ''' A fucntion that makes requests to the wikimedia pagecveiws API Parameters ---------- page_name : string A string containing the name of the wikipeida page you would like pageviews for start_date : string a date string YYYY/MM/DD indicating the first date that the request should return end_date : string a date string YYYY/MM/DD indicating the last date that the request should return. defaults to system date Returns ------- df : pandas DataFrame A dataframe with the article name and the number of pageviews. ''' # get rid of the / in the date sdate = start_date.split("/") # join together the text that was split sdate = ''.join(sdate) # if an end date is not specified if end_date == None: #get the current date end_date = str(datetime.datetime.now())[0:10].split("-") edate = ''.join(end_date) else: # use date from end_date argument edate = end_date.split("/") edate = edate[0] + edate[1] + edate[2] # use these elements to make an api request r = requests.get( "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia.org/all-access/all-agents/{}/daily/{}/{}".format(page_name,sdate, edate) ) # get the json result = r.json() # convert to dateframe df = pd.DataFrame(result['items']) # the wikimedia api returns 2 extra zeros at the end of the timestamp for some reason df['timestamp'] = [i[:-2] for i in df.timestamp] # convert to datetime df['timestamp'] = pd.to_datetime(df['timestamp']) # set timestamp as index df.set_index('timestamp', inplace = True) # return the article and views columns return df[['article', 'views']]
Python
def tsregplot(series, ax = None, days_forward = 10, color = 'C0'): ''' A fucntion that makes requests to the wikimedia pagecveiws API Parameters ---------- series : Pandas datetime index Series A pandas Series with datetime index ax : matplotlib axes object A matplotlib axes obect days_forward : int An integer indicating how many days to extend the regression line color : string A matplotlib color string Returns ------- ax : matplotlib axes object returns a matplotlib axes object with regplot ''' series = series.reset_index() series.columns = ['date', 'value'] if ax == None: series['date_ordinal'] = pd.to_datetime(series['date']).apply(lambda date: date.toordinal()) ax = sns.regplot( data=series, x='date_ordinal', y='value', color = color ) ax.set_xlim(series['date_ordinal'].min() - 2, series['date_ordinal'].max() + days_forward) ax.set_ylim(series['value'].min() *0.9, series['value'].max() * 1.1) ax.set_xlabel('date') new_labels = [date.fromordinal(int(item)) for item in ax.get_xticks()] ax.set_xticklabels(new_labels) else: series['date_ordinal'] = pd.to_datetime(series['date']).apply(lambda date: date.toordinal()) ax = sns.regplot( data=series, x='date_ordinal', y='value', ax = ax, color = color ) ax.set_xlim(series['date_ordinal'].min() - 5, series['date_ordinal'].max() + days_forward) ax.set_ylim(series['value'].min() * 0.9 , series['value'].max()* 1.1) ax.set_xlabel('date') new_labels = [date.fromordinal(int(item)).strftime("%m/%Y") for item in ax.get_xticks()] ax.set_xticklabels(new_labels) return ax
def tsregplot(series, ax = None, days_forward = 10, color = 'C0'): ''' A fucntion that makes requests to the wikimedia pagecveiws API Parameters ---------- series : Pandas datetime index Series A pandas Series with datetime index ax : matplotlib axes object A matplotlib axes obect days_forward : int An integer indicating how many days to extend the regression line color : string A matplotlib color string Returns ------- ax : matplotlib axes object returns a matplotlib axes object with regplot ''' series = series.reset_index() series.columns = ['date', 'value'] if ax == None: series['date_ordinal'] = pd.to_datetime(series['date']).apply(lambda date: date.toordinal()) ax = sns.regplot( data=series, x='date_ordinal', y='value', color = color ) ax.set_xlim(series['date_ordinal'].min() - 2, series['date_ordinal'].max() + days_forward) ax.set_ylim(series['value'].min() *0.9, series['value'].max() * 1.1) ax.set_xlabel('date') new_labels = [date.fromordinal(int(item)) for item in ax.get_xticks()] ax.set_xticklabels(new_labels) else: series['date_ordinal'] = pd.to_datetime(series['date']).apply(lambda date: date.toordinal()) ax = sns.regplot( data=series, x='date_ordinal', y='value', ax = ax, color = color ) ax.set_xlim(series['date_ordinal'].min() - 5, series['date_ordinal'].max() + days_forward) ax.set_ylim(series['value'].min() * 0.9 , series['value'].max()* 1.1) ax.set_xlabel('date') new_labels = [date.fromordinal(int(item)).strftime("%m/%Y") for item in ax.get_xticks()] ax.set_xticklabels(new_labels) return ax
Python
def listen_print_loop(responses, stream): """Iterates through server responses and prints them. The responses passed is a generator that will block until a response is provided by the server. Each response may contain multiple results, and each result may contain multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we print only the transcription for the top alternative of the top result. In this case, responses are provided for interim results as well. If the response is an interim one, print a line feed at the end of it, to allow the next result to overwrite it, until the response is a final one. For the final one, print a newline to preserve the finalized transcription. """ for response in responses: if get_current_time() - stream.start_time > STREAMING_LIMIT: stream.start_time = get_current_time() break if not response.results: continue result = response.results[0] if not result.alternatives: continue transcript = result.alternatives[0].transcript result_seconds = 0 result_micros = 0 if result.result_end_time.seconds: result_seconds = result.result_end_time.seconds if result.result_end_time.microseconds: result_micros = result.result_end_time.microseconds stream.result_end_time = int((result_seconds * 1000) + (result_micros / 1000)) corrected_time = ( stream.result_end_time - stream.bridging_offset + (STREAMING_LIMIT * stream.restart_counter) ) # Display interim results, but with a carriage return at the end of the # line, so subsequent lines will overwrite them. # セリフ確定 if result.is_final: # 標準出力 sys.stdout.write('\r' + transcript + '\n') # ファイル出力 with open(FILE_NAME, mode='a') as f: f.write(transcript + '\n') # ストリームの設定 stream.is_final_end_time = stream.result_end_time stream.last_transcript_was_final = True # セリフ入力中 else: # 標準出力 sys.stdout.write(transcript + '\r') # ストリームの設定 stream.last_transcript_was_final = False
def listen_print_loop(responses, stream): """Iterates through server responses and prints them. The responses passed is a generator that will block until a response is provided by the server. Each response may contain multiple results, and each result may contain multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we print only the transcription for the top alternative of the top result. In this case, responses are provided for interim results as well. If the response is an interim one, print a line feed at the end of it, to allow the next result to overwrite it, until the response is a final one. For the final one, print a newline to preserve the finalized transcription. """ for response in responses: if get_current_time() - stream.start_time > STREAMING_LIMIT: stream.start_time = get_current_time() break if not response.results: continue result = response.results[0] if not result.alternatives: continue transcript = result.alternatives[0].transcript result_seconds = 0 result_micros = 0 if result.result_end_time.seconds: result_seconds = result.result_end_time.seconds if result.result_end_time.microseconds: result_micros = result.result_end_time.microseconds stream.result_end_time = int((result_seconds * 1000) + (result_micros / 1000)) corrected_time = ( stream.result_end_time - stream.bridging_offset + (STREAMING_LIMIT * stream.restart_counter) ) # Display interim results, but with a carriage return at the end of the # line, so subsequent lines will overwrite them. # セリフ確定 if result.is_final: # 標準出力 sys.stdout.write('\r' + transcript + '\n') # ファイル出力 with open(FILE_NAME, mode='a') as f: f.write(transcript + '\n') # ストリームの設定 stream.is_final_end_time = stream.result_end_time stream.last_transcript_was_final = True # セリフ入力中 else: # 標準出力 sys.stdout.write(transcript + '\r') # ストリームの設定 stream.last_transcript_was_final = False
Python
def main(): """start bidirectional streaming from microphone input to speech API""" client = speech.SpeechClient() config = speech.RecognitionConfig( encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=SAMPLE_RATE, language_code="ja-JA", max_alternatives=1, ) streaming_config = speech.StreamingRecognitionConfig( config=config, interim_results=True ) ''' print(mic_manager.chunk_size) sys.stdout.write(YELLOW) sys.stdout.write('\nListening, say "Quit" or "Exit" to stop.\n\n') sys.stdout.write("End (ms) Transcript Results/Status\n") sys.stdout.write("=====================================================\n") ''' with mic_manager as stream: while not stream.closed: sys.stdout.write(YELLOW) sys.stdout.write( "\n" + str(STREAMING_LIMIT * stream.restart_counter) + ": NEW REQUEST\n" ) stream.audio_input = [] audio_generator = stream.generator() requests = ( speech.StreamingRecognizeRequest(audio_content=content) for content in audio_generator ) responses = client.streaming_recognize(streaming_config, requests) # Now, put the transcription responses to use. listen_print_loop(responses, stream) if stream.result_end_time > 0: stream.final_request_end_time = stream.is_final_end_time stream.result_end_time = 0 stream.last_audio_input = [] stream.last_audio_input = stream.audio_input stream.audio_input = [] stream.restart_counter = stream.restart_counter + 1 if not stream.last_transcript_was_final: sys.stdout.write("\n") stream.new_stream = True
def main(): """start bidirectional streaming from microphone input to speech API""" client = speech.SpeechClient() config = speech.RecognitionConfig( encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=SAMPLE_RATE, language_code="ja-JA", max_alternatives=1, ) streaming_config = speech.StreamingRecognitionConfig( config=config, interim_results=True ) ''' print(mic_manager.chunk_size) sys.stdout.write(YELLOW) sys.stdout.write('\nListening, say "Quit" or "Exit" to stop.\n\n') sys.stdout.write("End (ms) Transcript Results/Status\n") sys.stdout.write("=====================================================\n") ''' with mic_manager as stream: while not stream.closed: sys.stdout.write(YELLOW) sys.stdout.write( "\n" + str(STREAMING_LIMIT * stream.restart_counter) + ": NEW REQUEST\n" ) stream.audio_input = [] audio_generator = stream.generator() requests = ( speech.StreamingRecognizeRequest(audio_content=content) for content in audio_generator ) responses = client.streaming_recognize(streaming_config, requests) # Now, put the transcription responses to use. listen_print_loop(responses, stream) if stream.result_end_time > 0: stream.final_request_end_time = stream.is_final_end_time stream.result_end_time = 0 stream.last_audio_input = [] stream.last_audio_input = stream.audio_input stream.audio_input = [] stream.restart_counter = stream.restart_counter + 1 if not stream.last_transcript_was_final: sys.stdout.write("\n") stream.new_stream = True
Python
def new_user(): """ Creates a test user 1. """ user = User(email='[email protected]', username='TESTING123') user.set_password('TESTING') return user
def new_user(): """ Creates a test user 1. """ user = User(email='[email protected]', username='TESTING123') user.set_password('TESTING') return user
Python
def new_user1(): """ Creates a test user 2. """ user1 = User(email='[email protected]', username='TESTING321') user1.set_password('TESTING') return user1
def new_user1(): """ Creates a test user 2. """ user1 = User(email='[email protected]', username='TESTING321') user1.set_password('TESTING') return user1
Python
def init_job(job): """Initialize individual job workspace, including mdp and molecular init files.""" sys.path.append(Project().root_directory() + "/..") from reproducibility_project.src.engine_input.gromacs import mdp from reproducibility_project.src.molecules.system_builder import ( construct_system, ) # Create a Compound and save to gro and top files system = construct_system(job.sp) system[0].save(filename="init.gro", overwrite=True) ff = load_ff(job.sp.forcefield_name) param_system = ff.apply(system[0]) param_system.save( "init.top", overwrite=True, ) # Modify mdp files according to job statepoint parameters cutoff_styles = {"hard": "Cut-off"} pressure = job.sp.pressure * u.kPa mdp_abs_path = os.path.dirname(os.path.abspath(mdp.__file__)) mdps = { "em": { "fname": "em.mdp", "template": f"{mdp_abs_path}/em_template.mdp.jinja", "data": { "r_cut": job.sp.r_cut, "cutoff_style": cutoff_styles[job.sp.cutoff_style], "temp": job.sp.temperature, "replica": job.sp.replica, }, }, "nvt": { "fname": "nvt.mdp", "template": f"{mdp_abs_path}/nvt_template.mdp.jinja", "data": { "temp": job.sp.temperature, "r_cut": job.sp.r_cut, "cutoff_style": cutoff_styles[job.sp.cutoff_style], }, }, "npt": { "fname": "npt.mdp", "template": f"{mdp_abs_path}/npt_template.mdp.jinja", "data": { "temp": job.sp.temperature, "refp": pressure.to_value("bar"), "r_cut": job.sp.r_cut, "cutoff_style": cutoff_styles[job.sp.cutoff_style], }, }, } for op, mdp in mdps.items(): _setup_mdp( fname=mdp["fname"], template=mdp["template"], data=mdp["data"], overwrite=True, )
def init_job(job): """Initialize individual job workspace, including mdp and molecular init files.""" sys.path.append(Project().root_directory() + "/..") from reproducibility_project.src.engine_input.gromacs import mdp from reproducibility_project.src.molecules.system_builder import ( construct_system, ) # Create a Compound and save to gro and top files system = construct_system(job.sp) system[0].save(filename="init.gro", overwrite=True) ff = load_ff(job.sp.forcefield_name) param_system = ff.apply(system[0]) param_system.save( "init.top", overwrite=True, ) # Modify mdp files according to job statepoint parameters cutoff_styles = {"hard": "Cut-off"} pressure = job.sp.pressure * u.kPa mdp_abs_path = os.path.dirname(os.path.abspath(mdp.__file__)) mdps = { "em": { "fname": "em.mdp", "template": f"{mdp_abs_path}/em_template.mdp.jinja", "data": { "r_cut": job.sp.r_cut, "cutoff_style": cutoff_styles[job.sp.cutoff_style], "temp": job.sp.temperature, "replica": job.sp.replica, }, }, "nvt": { "fname": "nvt.mdp", "template": f"{mdp_abs_path}/nvt_template.mdp.jinja", "data": { "temp": job.sp.temperature, "r_cut": job.sp.r_cut, "cutoff_style": cutoff_styles[job.sp.cutoff_style], }, }, "npt": { "fname": "npt.mdp", "template": f"{mdp_abs_path}/npt_template.mdp.jinja", "data": { "temp": job.sp.temperature, "refp": pressure.to_value("bar"), "r_cut": job.sp.r_cut, "cutoff_style": cutoff_styles[job.sp.cutoff_style], }, }, } for op, mdp in mdps.items(): _setup_mdp( fname=mdp["fname"], template=mdp["template"], data=mdp["data"], overwrite=True, )
Python
def grompp_em(job): """Run GROMACS grompp for the energy minimization step.""" em_mdp_path = "em.mdp" msg = f"gmx grompp -f {em_mdp_path} -o em.tpr -c init.gro -p init.top --maxwarn 1" return msg
def grompp_em(job): """Run GROMACS grompp for the energy minimization step.""" em_mdp_path = "em.mdp" msg = f"gmx grompp -f {em_mdp_path} -o em.tpr -c init.gro -p init.top --maxwarn 1" return msg
Python
def grompp_nvt(job): """Run GROMACS grompp for the nvt step.""" nvt_mdp_path = "nvt.mdp" msg = f"gmx grompp -f {nvt_mdp_path} -o nvt.tpr -c em.gro -p init.top --maxwarn 1" return msg
def grompp_nvt(job): """Run GROMACS grompp for the nvt step.""" nvt_mdp_path = "nvt.mdp" msg = f"gmx grompp -f {nvt_mdp_path} -o nvt.tpr -c em.gro -p init.top --maxwarn 1" return msg
Python
def grompp_npt(job): """Run GROMACS grompp for the npt step.""" npt_mdp_path = "npt.mdp" msg = f"gmx grompp -f {npt_mdp_path} -o npt.tpr -c em.gro -p init.top --maxwarn 1" return msg
def grompp_npt(job): """Run GROMACS grompp for the npt step.""" npt_mdp_path = "npt.mdp" msg = f"gmx grompp -f {npt_mdp_path} -o npt.tpr -c em.gro -p init.top --maxwarn 1" return msg
Python
def _setup_mdp(fname, template, data, overwrite=False): """Create mdp files based on a template and provided data. Parameters ---------- fname: str Name of the file to be saved out template: str, or jinja2.Template Either a jinja2.Template or path to a jinja template data: dict Dictionary storing data matched with the fields available in the template overwrite: bool, optional, default=False Options to overwrite (or not) existing mdp file of the Returns ------- File saved with names defined by fname """ from jinja2 import Template if isinstance(template, str): with open(template, "r") as f: template = Template(f.read()) if not overwrite: if os.path.isfile(fname): raise FileExistsError( f"{fname} already exists. Set overwrite=True to write out." ) rendered = template.render(data) with open(fname, "w") as f: f.write(rendered) return None
def _setup_mdp(fname, template, data, overwrite=False): """Create mdp files based on a template and provided data. Parameters ---------- fname: str Name of the file to be saved out template: str, or jinja2.Template Either a jinja2.Template or path to a jinja template data: dict Dictionary storing data matched with the fields available in the template overwrite: bool, optional, default=False Options to overwrite (or not) existing mdp file of the Returns ------- File saved with names defined by fname """ from jinja2 import Template if isinstance(template, str): with open(template, "r") as f: template = Template(f.read()) if not overwrite: if os.path.isfile(fname): raise FileExistsError( f"{fname} already exists. Set overwrite=True to write out." ) rendered = template.render(data) with open(fname, "w") as f: f.write(rendered) return None
Python
def run_hoomd(job): """Run a simulation with HOOMD-blue.""" import foyer import hoomd import hoomd.md import unyt as u from mbuild.formats.gsdwriter import write_gsd from mbuild.formats.hoomd_forcefield import create_hoomd_forcefield from reproducibility_project.src.molecules.system_builder import ( construct_system, ) # temporary hack until benzene and ethanol are added try: # Ignore the vapor box # Initialize with box expanded by factor of 5 # We will shrink it later filled_box, _ = construct_system(job.sp, scale=5) except AttributeError: return ff = load_ff(job.sp.forcefield_name) structure = ff.apply(filled_box) # ref_distance: 10 angstrom -> 1 nm # ref_energy: 1/4.184 kcal/mol -> 1 kJ/mol # ref_mass: 0.9999938574 dalton -> 1 amu d = 10 e = 1 / 4.184 m = 0.9999938574 write_gsd( structure, job.fn("init.gsd"), ref_distance=d, ref_energy=e, ref_mass=m ) snapshot, forcefield, ref_vals = create_hoomd_forcefield( structure, ref_distance=d, ref_energy=e, ref_mass=m ) device = hoomd.device.auto_select() sim = hoomd.Simulation(device=device, seed=job.sp.replica) sim.create_state_from_snapshot(snapshot) gsd_writer = hoomd.write.GSD( filename=job.fn("trajectory.gsd"), trigger=hoomd.trigger.Periodic(10000), mode="ab", ) sim.operations.writers.append(gsd_writer) logger = hoomd.logging.Logger(categories=["scalar"]) logger.add(sim, quantities=["timestep", "tps"]) thermo_props = hoomd.md.compute.ThermodynamicQuantities( filter=hoomd.filter.All() ) sim.operations.computes.append(thermo_props) logger.add( thermo_props, quantities=[ "kinetic_energy", "potential_energy", "pressure", "kinetic_temperature", "volume", ], ) file = open(job.fn("log.txt"), mode="a", newline="\n") table_file = hoomd.write.Table( output=file, trigger=hoomd.trigger.Periodic(period=5000), logger=logger, max_header_len=7, ) sim.operations.writers.append(table_file) integrator = hoomd.md.Integrator(dt=0.005) integrator.forces = forcefield # convert temp in K to kJ/mol kT = (job.sp.temperature * u.K).to_equivalent("kJ/mol", "thermal").value nvt = hoomd.md.methods.NVT(filter=hoomd.filter.All(), kT=kT, tau=1.0) integrator.methods = [nvt] sim.operations.integrator = integrator sim.state.thermalize_particle_momenta(filter=hoomd.filter.All(), kT=kT) # Shrink step follows this example # https://hoomd-blue.readthedocs.io/en/latest/tutorial/ # 01-Introducing-Molecular-Dynamics/03-Compressing-the-System.html ramp = hoomd.variant.Ramp(A=0, B=1, t_start=sim.timestep, t_ramp=int(2e4)) initial_box = sim.state.box L = job.sp.box_L_liq final_box = hoomd.Box(Lx=L, Ly=L, Lz=L) box_resize_trigger = hoomd.trigger.Periodic(10) box_resize = hoomd.update.BoxResize( box1=initial_box, box2=final_box, variant=ramp, trigger=box_resize_trigger, ) sim.operations.updaters.append(box_resize) sim.run(2e4 + 1) assert sim.state.box == final_box sim.operations.updaters.remove(box_resize) sim.run(1e6) job.doc.finished = True
def run_hoomd(job): """Run a simulation with HOOMD-blue.""" import foyer import hoomd import hoomd.md import unyt as u from mbuild.formats.gsdwriter import write_gsd from mbuild.formats.hoomd_forcefield import create_hoomd_forcefield from reproducibility_project.src.molecules.system_builder import ( construct_system, ) # temporary hack until benzene and ethanol are added try: # Ignore the vapor box # Initialize with box expanded by factor of 5 # We will shrink it later filled_box, _ = construct_system(job.sp, scale=5) except AttributeError: return ff = load_ff(job.sp.forcefield_name) structure = ff.apply(filled_box) # ref_distance: 10 angstrom -> 1 nm # ref_energy: 1/4.184 kcal/mol -> 1 kJ/mol # ref_mass: 0.9999938574 dalton -> 1 amu d = 10 e = 1 / 4.184 m = 0.9999938574 write_gsd( structure, job.fn("init.gsd"), ref_distance=d, ref_energy=e, ref_mass=m ) snapshot, forcefield, ref_vals = create_hoomd_forcefield( structure, ref_distance=d, ref_energy=e, ref_mass=m ) device = hoomd.device.auto_select() sim = hoomd.Simulation(device=device, seed=job.sp.replica) sim.create_state_from_snapshot(snapshot) gsd_writer = hoomd.write.GSD( filename=job.fn("trajectory.gsd"), trigger=hoomd.trigger.Periodic(10000), mode="ab", ) sim.operations.writers.append(gsd_writer) logger = hoomd.logging.Logger(categories=["scalar"]) logger.add(sim, quantities=["timestep", "tps"]) thermo_props = hoomd.md.compute.ThermodynamicQuantities( filter=hoomd.filter.All() ) sim.operations.computes.append(thermo_props) logger.add( thermo_props, quantities=[ "kinetic_energy", "potential_energy", "pressure", "kinetic_temperature", "volume", ], ) file = open(job.fn("log.txt"), mode="a", newline="\n") table_file = hoomd.write.Table( output=file, trigger=hoomd.trigger.Periodic(period=5000), logger=logger, max_header_len=7, ) sim.operations.writers.append(table_file) integrator = hoomd.md.Integrator(dt=0.005) integrator.forces = forcefield # convert temp in K to kJ/mol kT = (job.sp.temperature * u.K).to_equivalent("kJ/mol", "thermal").value nvt = hoomd.md.methods.NVT(filter=hoomd.filter.All(), kT=kT, tau=1.0) integrator.methods = [nvt] sim.operations.integrator = integrator sim.state.thermalize_particle_momenta(filter=hoomd.filter.All(), kT=kT) # Shrink step follows this example # https://hoomd-blue.readthedocs.io/en/latest/tutorial/ # 01-Introducing-Molecular-Dynamics/03-Compressing-the-System.html ramp = hoomd.variant.Ramp(A=0, B=1, t_start=sim.timestep, t_ramp=int(2e4)) initial_box = sim.state.box L = job.sp.box_L_liq final_box = hoomd.Box(Lx=L, Ly=L, Lz=L) box_resize_trigger = hoomd.trigger.Periodic(10) box_resize = hoomd.update.BoxResize( box1=initial_box, box2=final_box, variant=ramp, trigger=box_resize_trigger, ) sim.operations.updaters.append(box_resize) sim.run(2e4 + 1) assert sim.state.box == final_box sim.operations.updaters.remove(box_resize) sim.run(1e6) job.doc.finished = True
Python
def is_equilibrated( a_t: npt.ArrayLike, threshold: float = 0.8, nskip: int = 1 ) -> List: """Check if a dataset is equilibrated based on a fraction of equil data. Using `pymbar.timeseries` module, check if a timeseries dataset has enough equilibrated data based on a threshold value. The threshold value translates to the fraction of total data from the dataset 'a_t' that can be thought of as being in the 'production' region. The `pymbar.timeseries` module returns the starting index of the 'production' region from 'a_t'. The fraction of 'production' data is then compared to the threshold value. If the fraction of 'production' data is >= threshold fraction this will return a list of [True, t0, g] and [False, None, None] otherwise. Parameters ---------- a_t : numpy.typing.Arraylike 1-D time dependent data to check for equilibration. threshold : float, optional, default=0.8 Fraction of data expected to be equilibrated. nskip : int, optional, default=1 Since the statistical inefficiency is computed for every time origin in a call to timeseries.detectEquilibration, for larger datasets (> few hundred), increasing nskip might speed this up, while discarding more data. """ if threshold < 0.0 or threshold > 1.0: raise ValueError( f"Passed 'threshold' value: {threshold}, expected value between 0.0-1.0." ) [t0, g, _] = timeseries.detectEquilibration(a_t, nskip=nskip) frac_equilibrated = 1.0 - (t0 / np.shape(a_t)[0]) if frac_equilibrated >= threshold: return [True, t0, g] else: return [False, t0, g]
def is_equilibrated( a_t: npt.ArrayLike, threshold: float = 0.8, nskip: int = 1 ) -> List: """Check if a dataset is equilibrated based on a fraction of equil data. Using `pymbar.timeseries` module, check if a timeseries dataset has enough equilibrated data based on a threshold value. The threshold value translates to the fraction of total data from the dataset 'a_t' that can be thought of as being in the 'production' region. The `pymbar.timeseries` module returns the starting index of the 'production' region from 'a_t'. The fraction of 'production' data is then compared to the threshold value. If the fraction of 'production' data is >= threshold fraction this will return a list of [True, t0, g] and [False, None, None] otherwise. Parameters ---------- a_t : numpy.typing.Arraylike 1-D time dependent data to check for equilibration. threshold : float, optional, default=0.8 Fraction of data expected to be equilibrated. nskip : int, optional, default=1 Since the statistical inefficiency is computed for every time origin in a call to timeseries.detectEquilibration, for larger datasets (> few hundred), increasing nskip might speed this up, while discarding more data. """ if threshold < 0.0 or threshold > 1.0: raise ValueError( f"Passed 'threshold' value: {threshold}, expected value between 0.0-1.0." ) [t0, g, _] = timeseries.detectEquilibration(a_t, nskip=nskip) frac_equilibrated = 1.0 - (t0 / np.shape(a_t)[0]) if frac_equilibrated >= threshold: return [True, t0, g] else: return [False, t0, g]
Python
def trim_non_equilibrated( a_t: npt.ArrayLike, threshold: float = 0.75, nskip: int = 1 ) -> List: """Prune timeseries array to just the production data. Refer to equilibration.is_equilibrated for addtional information. This method returns a list of length 3, where list[0] is the trimmed array, list[1] is the calculated statistical inefficiency and list[2] is the index of the original dataset where equilibration begins, which can be used when subsampling the data using `pymbar.timseries.subsampleCorrelatedData`. Refer to https://pymbar.readthedocs.io/en/master/timeseries.html for additional information. Parameters ---------- a_t : numpy.typing.Arraylike 1-D time dependent data to check for equilibration. threshold : float, optional, default=0.8 Fraction of data expected to be equilibrated. nskip : int, optional, default=1 Since the statistical inefficiency is computed for every time origin in a call to timeseries.detectEquilibration, for larger datasets (> few hundred), increasing nskip might speed this up, while discarding more data. """ [truth, t0, g] = is_equilibrated(a_t, threshold=threshold, nskip=nskip) if not truth: raise ValueError( f"Data with a threshold of {threshold} is not equilibrated!" ) return [a_t[t0:], g, t0]
def trim_non_equilibrated( a_t: npt.ArrayLike, threshold: float = 0.75, nskip: int = 1 ) -> List: """Prune timeseries array to just the production data. Refer to equilibration.is_equilibrated for addtional information. This method returns a list of length 3, where list[0] is the trimmed array, list[1] is the calculated statistical inefficiency and list[2] is the index of the original dataset where equilibration begins, which can be used when subsampling the data using `pymbar.timseries.subsampleCorrelatedData`. Refer to https://pymbar.readthedocs.io/en/master/timeseries.html for additional information. Parameters ---------- a_t : numpy.typing.Arraylike 1-D time dependent data to check for equilibration. threshold : float, optional, default=0.8 Fraction of data expected to be equilibrated. nskip : int, optional, default=1 Since the statistical inefficiency is computed for every time origin in a call to timeseries.detectEquilibration, for larger datasets (> few hundred), increasing nskip might speed this up, while discarding more data. """ [truth, t0, g] = is_equilibrated(a_t, threshold=threshold, nskip=nskip) if not truth: raise ValueError( f"Data with a threshold of {threshold} is not equilibrated!" ) return [a_t[t0:], g, t0]
Python
def networkinfo(self): ''' This method retuns Complete Network Related Information call this Method objectName.networkinfo() ''' network_info={} ipandmacAddress={} ipandmacAddress['HostNodeName']=self.getNetworkName() ipandmacAddress['IpAddress']=self.getIpAddress() ipandmacAddress['MacAddress']=self.getMacAddress() network_info['ipandmacAddress']=[ipandmacAddress] network_categories=['netclient','NETPROTOCOL','nic','RDNIC','NICCONFIG'] for part in network_categories: network_info[part]=self.Preprocess(part) return network_info
def networkinfo(self): ''' This method retuns Complete Network Related Information call this Method objectName.networkinfo() ''' network_info={} ipandmacAddress={} ipandmacAddress['HostNodeName']=self.getNetworkName() ipandmacAddress['IpAddress']=self.getIpAddress() ipandmacAddress['MacAddress']=self.getMacAddress() network_info['ipandmacAddress']=[ipandmacAddress] network_categories=['netclient','NETPROTOCOL','nic','RDNIC','NICCONFIG'] for part in network_categories: network_info[part]=self.Preprocess(part) return network_info
Python
def convert_size(self,size_bytes): ''' Accept the integer bytes size and convert into KB,MB,GB sizes ''' if size_bytes == 0: return "0B" size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "%s %s" % (s, size_name[i])
def convert_size(self,size_bytes): ''' Accept the integer bytes size and convert into KB,MB,GB sizes ''' if size_bytes == 0: return "0B" size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "%s %s" % (s, size_name[i])
Python
def GetCount(self): ''' GetCount() Return all files Count in Your System Output: res-->is an dictionary containing all drives and files count ''' drives=self.getDrives() filelist=[] res=[] for i in drives[1:]: result={} result['drive']=i flist=self.getFileList(i) filelist.append(flist) result['count']=len(flist) res.append(result) return res
def GetCount(self): ''' GetCount() Return all files Count in Your System Output: res-->is an dictionary containing all drives and files count ''' drives=self.getDrives() filelist=[] res=[] for i in drives[1:]: result={} result['drive']=i flist=self.getFileList(i) filelist.append(flist) result['count']=len(flist) res.append(result) return res
Python
def GetSystemInfo(self): ''' This Method Return a dictionary object of System Information using Windows Registery and module platform class this method objectname.GetSystemInfo() ''' #Create a Dictionary object for saving all data system_data={} #Get System information using Registry reg_data=['ProductName','InstallDate','PathName','ReleaseId','CompositionEditionID','EditionID','SoftwareType', 'SystemRoot','ProductId','BuildBranch','BuildLab','BuildLabEx','CurrentBuild'] for name in reg_data: value=self.get_reg_value(name) if name=="CompositionEditionID": system_data["CompositionID"]=value elif name=="InstallDate": system_data[name]=str(datetime.fromtimestamp(value)) else: system_data[name]=value #Get system information using platform module platform_data=['machine','node','platform','system','release','version','processor'] platform_name=['Machine Name','Network Name','Platform Type','System Type','Release No ','Version No','Processor Name'] for idx,name in enumerate(platform_data): value=self.getPlatform(name) names=platform_name[idx] system_data[names]=value system_categories=['OS','TIMEZONE','BOOTCONFIG','COMPUTERSYSTEM','STARTUP'] Final_result={} Final_result['SystemData']=[system_data] for part in system_categories: Final_result[part]=self.Preprocess(part) return Final_result
def GetSystemInfo(self): ''' This Method Return a dictionary object of System Information using Windows Registery and module platform class this method objectname.GetSystemInfo() ''' #Create a Dictionary object for saving all data system_data={} #Get System information using Registry reg_data=['ProductName','InstallDate','PathName','ReleaseId','CompositionEditionID','EditionID','SoftwareType', 'SystemRoot','ProductId','BuildBranch','BuildLab','BuildLabEx','CurrentBuild'] for name in reg_data: value=self.get_reg_value(name) if name=="CompositionEditionID": system_data["CompositionID"]=value elif name=="InstallDate": system_data[name]=str(datetime.fromtimestamp(value)) else: system_data[name]=value #Get system information using platform module platform_data=['machine','node','platform','system','release','version','processor'] platform_name=['Machine Name','Network Name','Platform Type','System Type','Release No ','Version No','Processor Name'] for idx,name in enumerate(platform_data): value=self.getPlatform(name) names=platform_name[idx] system_data[names]=value system_categories=['OS','TIMEZONE','BOOTCONFIG','COMPUTERSYSTEM','STARTUP'] Final_result={} Final_result['SystemData']=[system_data] for part in system_categories: Final_result[part]=self.Preprocess(part) return Final_result
Python
def eval_exec_match(db, p_str, g_str, pred, gold): """ return 1 if the values between prediction and gold are matching in the corresponding index. Currently not support multiple col_unit(pairs). """ if isinstance(db, str): conn = sqlite3.connect(db) conn.text_factory = lambda x: str(x, 'latin1') cursor = conn.cursor() else: cursor = db # pdb.set_trace() def convert_case_insensitive(query): new_query = '' last_quote = '' for char in query: new_query += char if char in {'"', '\''} and not last_quote: last_quote = char elif char == last_quote: last_quote = '' new_query += ' COLLATE NOCASE' return new_query def convert_p_query(query): # query = query.replace('.0','') query = query.replace('$$','\'') # query = query.replace('DISTINCT','') like_vals = re.findall(r'LIKE \'\w+\'',query) + re.findall(r'LIKE \"\w+\"',query) if like_vals: for val in like_vals: query = query.replace(val, 'LIKE \'%{}%\''.format(val[6:-1])) new_query = convert_case_insensitive(query) return new_query try: # fix CAST column p_str = p_str.replace('cast', '`cast`') cursor.execute(convert_p_query(p_str)) p_res = cursor.fetchall() except: return False, False, True cursor.execute(convert_case_insensitive(g_str)) q_res = cursor.fetchall() def res_map(res, select_units): keys = [] for idx, (agg, val_unit) in enumerate(select_units): key = (agg, tuple(val_unit[1])) if not val_unit[2] else (agg, val_unit[0], tuple(val_unit[1]), tuple(val_unit[2])) keys.append([key, idx]) keys = sorted(keys, key=lambda x:str(x[0])) res = sorted([tuple([r[idx] for _, idx in keys]) for r in res], key = lambda x:str(x)) return tuple(res) p_select_units = pred['select'][1] q_select_units = gold['select'][1] # if len(q_res)==0 or (len(q_res)==1 and q_res[0]==(0,)): # # print('run into empty result set:\n{}\n{}'.format(convert_case_insensitive(p_str), convert_case_insensitive(g_str))) # pdb.set_trace() # return False # if res_map(p_res, p_val_units) != res_map(q_res, q_val_units): # pdb.set_trace() return res_map(p_res, p_select_units) == res_map(q_res, q_select_units), len(q_res)==0 or (len(q_res)==1 and q_res[0]==(0,)), False
def eval_exec_match(db, p_str, g_str, pred, gold): """ return 1 if the values between prediction and gold are matching in the corresponding index. Currently not support multiple col_unit(pairs). """ if isinstance(db, str): conn = sqlite3.connect(db) conn.text_factory = lambda x: str(x, 'latin1') cursor = conn.cursor() else: cursor = db # pdb.set_trace() def convert_case_insensitive(query): new_query = '' last_quote = '' for char in query: new_query += char if char in {'"', '\''} and not last_quote: last_quote = char elif char == last_quote: last_quote = '' new_query += ' COLLATE NOCASE' return new_query def convert_p_query(query): # query = query.replace('.0','') query = query.replace('$$','\'') # query = query.replace('DISTINCT','') like_vals = re.findall(r'LIKE \'\w+\'',query) + re.findall(r'LIKE \"\w+\"',query) if like_vals: for val in like_vals: query = query.replace(val, 'LIKE \'%{}%\''.format(val[6:-1])) new_query = convert_case_insensitive(query) return new_query try: # fix CAST column p_str = p_str.replace('cast', '`cast`') cursor.execute(convert_p_query(p_str)) p_res = cursor.fetchall() except: return False, False, True cursor.execute(convert_case_insensitive(g_str)) q_res = cursor.fetchall() def res_map(res, select_units): keys = [] for idx, (agg, val_unit) in enumerate(select_units): key = (agg, tuple(val_unit[1])) if not val_unit[2] else (agg, val_unit[0], tuple(val_unit[1]), tuple(val_unit[2])) keys.append([key, idx]) keys = sorted(keys, key=lambda x:str(x[0])) res = sorted([tuple([r[idx] for _, idx in keys]) for r in res], key = lambda x:str(x)) return tuple(res) p_select_units = pred['select'][1] q_select_units = gold['select'][1] # if len(q_res)==0 or (len(q_res)==1 and q_res[0]==(0,)): # # print('run into empty result set:\n{}\n{}'.format(convert_case_insensitive(p_str), convert_case_insensitive(g_str))) # pdb.set_trace() # return False # if res_map(p_res, p_val_units) != res_map(q_res, q_val_units): # pdb.set_trace() return res_map(p_res, p_select_units) == res_map(q_res, q_select_units), len(q_res)==0 or (len(q_res)==1 and q_res[0]==(0,)), False
Python
def compute_cell_value_linking_v1(tokens, schema, bert_idx_map=None): """check for value linking with cached db column values""" def isnumber(word): try: float(word) return True except: return False num_date_match = {} cell_match = {} for q_id, word in enumerate(tokens): if len(word.strip()) == 0: continue if word in STOPWORDS or word in PUNKS: continue num_flag = isnumber(word) CELL_MATCH_FLAG = "CELLMATCH" for col_id, column in enumerate(schema.columns): if col_id == 0: assert column.orig_name == "*" continue if column.synonym_for is not None: orig_column_id = column.synonym_for.id orig_match_key = f"{q_id},{orig_column_id}" syn_match_key = f"{q_id},{col_id}" if orig_match_key in num_date_match: num_date_match[syn_match_key] = column.type.upper() if orig_match_key in cell_match: cell_match[syn_match_key] = CELL_MATCH_FLAG continue if column.type == "time" and num_flag: # TODO unify date format and enable comparision num_date_match[f"{q_id},{col_id}"] = column.type.upper() elif column.type == "number" and num_flag: num = float(word) min_col_val, max_col_val = column.value_range try: if min_col_val is not None and max_col_val is not None and num >= min_col_val and num <= max_col_val: num_date_match[f"{q_id},{col_id}"] = column.type.upper() except TypeError: pass else: if word in column.value_vocab: cell_match[f"{q_id},{col_id}"] = CELL_MATCH_FLAG cv_link = {"num_date_match": num_date_match, "cell_match" : cell_match} if bert_idx_map is not None: new_cv_link = {} for m_type in cv_link: _match = {} for ij_str in cv_link[m_type]: q_id_str, col_tab_id_str = ij_str.split(",") q_id, col_tab_id = int(q_id_str), int(col_tab_id_str) real_q_id = bert_idx_map[q_id] _match[f"{real_q_id},{col_tab_id}"] = cv_link[m_type][ij_str] new_cv_link[m_type] = _match cv_link = new_cv_link return cv_link
def compute_cell_value_linking_v1(tokens, schema, bert_idx_map=None): """check for value linking with cached db column values""" def isnumber(word): try: float(word) return True except: return False num_date_match = {} cell_match = {} for q_id, word in enumerate(tokens): if len(word.strip()) == 0: continue if word in STOPWORDS or word in PUNKS: continue num_flag = isnumber(word) CELL_MATCH_FLAG = "CELLMATCH" for col_id, column in enumerate(schema.columns): if col_id == 0: assert column.orig_name == "*" continue if column.synonym_for is not None: orig_column_id = column.synonym_for.id orig_match_key = f"{q_id},{orig_column_id}" syn_match_key = f"{q_id},{col_id}" if orig_match_key in num_date_match: num_date_match[syn_match_key] = column.type.upper() if orig_match_key in cell_match: cell_match[syn_match_key] = CELL_MATCH_FLAG continue if column.type == "time" and num_flag: # TODO unify date format and enable comparision num_date_match[f"{q_id},{col_id}"] = column.type.upper() elif column.type == "number" and num_flag: num = float(word) min_col_val, max_col_val = column.value_range try: if min_col_val is not None and max_col_val is not None and num >= min_col_val and num <= max_col_val: num_date_match[f"{q_id},{col_id}"] = column.type.upper() except TypeError: pass else: if word in column.value_vocab: cell_match[f"{q_id},{col_id}"] = CELL_MATCH_FLAG cv_link = {"num_date_match": num_date_match, "cell_match" : cell_match} if bert_idx_map is not None: new_cv_link = {} for m_type in cv_link: _match = {} for ij_str in cv_link[m_type]: q_id_str, col_tab_id_str = ij_str.split(",") q_id, col_tab_id = int(q_id_str), int(col_tab_id_str) real_q_id = bert_idx_map[q_id] _match[f"{real_q_id},{col_tab_id}"] = cv_link[m_type][ij_str] new_cv_link[m_type] = _match cv_link = new_cv_link return cv_link
Python
def compute_cell_value_linking_v2(tokens, schema, bert_idx_map=None): """check for value linking with cached db column values. Match ngrams, include fine link mark like match start""" def isnumber(word): try: float(word) return True except: return False num_date_match = {} cell_match = {} q_id = 0 while q_id < len(tokens): tmp_match = [{},{}] n = 5 while n > 0: if q_id + n <= len(tokens): word = ' '.join(tokens[q_id:q_id+n]) if len(word.strip()) == 0: n -= 1 continue num_flag = isnumber(word) CELL_MATCH_FLAG = "CELLMATCH" # exact match to cell CELL_MATCH_START_FLAG = "CELLMATCHSTART" # exact match to cell, mark start of match CELL_P_MATCH_FLAG = "CELLTOKENMATCH" # match token vocabulary of column for col_id, column in enumerate(schema.columns): if col_id == 0: assert column.orig_name == "*" continue if column.synonym_for is not None: orig_column_id = column.synonym_for.id orig_match_key = f"{q_id},{orig_column_id}" syn_match_key = f"{q_id},{col_id}" if orig_match_key in num_date_match: num_date_match[syn_match_key] = num_date_match[orig_match_key] if orig_match_key in cell_match: cell_match[syn_match_key] = cell_match[orig_match_key] continue if column.type == "time" and num_flag: # TODO unify date format and enable comparision num_date_match[f"{q_id},{col_id}"] = column.type.upper() elif column.type == "number" and num_flag: num = float(word) min_col_val, max_col_val = column.value_range try: if min_col_val is not None and max_col_val is not None and num >= min_col_val and num <= max_col_val: num_date_match[f"{q_id},{col_id}"] = column.type.upper() except TypeError: pass else: if f"{q_id},{col_id}" in cell_match: continue if n>1: if word in column.cell_values: cell_match[f"{q_id},{col_id}"] = CELL_MATCH_START_FLAG if n>1: for m_q_id in range(q_id+1,q_id+n): cell_match[f"{m_q_id},{col_id}"] = CELL_MATCH_FLAG else: if word in STOPWORDS or word in PUNKS: continue if word in column.cell_values: tmp_match[0][f"{q_id},{col_id}"] = CELL_MATCH_START_FLAG elif word in column.tokens: tmp_match[1][f"{q_id},{col_id}"] = CELL_P_MATCH_FLAG n -= 1 if len(tmp_match[0])!=0: for q_col in tmp_match[0]: if q_col not in cell_match: cell_match[q_col] = tmp_match[0][q_col] elif len(tmp_match[0])==0 and len(tmp_match[1])<3: for q_col in tmp_match[1]: if q_col not in cell_match: cell_match[q_col] = tmp_match[1][q_col] q_id += 1 cv_link = {"num_date_match": num_date_match, "cell_match" : cell_match} if bert_idx_map is not None: new_cv_link = {} for m_type in cv_link: _match = {} for ij_str in cv_link[m_type]: q_id_str, col_tab_id_str = ij_str.split(",") q_id, col_tab_id = int(q_id_str), int(col_tab_id_str) real_q_id = bert_idx_map[q_id] _match[f"{real_q_id},{col_tab_id}"] = cv_link[m_type][ij_str] new_cv_link[m_type] = _match cv_link = new_cv_link return cv_link
def compute_cell_value_linking_v2(tokens, schema, bert_idx_map=None): """check for value linking with cached db column values. Match ngrams, include fine link mark like match start""" def isnumber(word): try: float(word) return True except: return False num_date_match = {} cell_match = {} q_id = 0 while q_id < len(tokens): tmp_match = [{},{}] n = 5 while n > 0: if q_id + n <= len(tokens): word = ' '.join(tokens[q_id:q_id+n]) if len(word.strip()) == 0: n -= 1 continue num_flag = isnumber(word) CELL_MATCH_FLAG = "CELLMATCH" # exact match to cell CELL_MATCH_START_FLAG = "CELLMATCHSTART" # exact match to cell, mark start of match CELL_P_MATCH_FLAG = "CELLTOKENMATCH" # match token vocabulary of column for col_id, column in enumerate(schema.columns): if col_id == 0: assert column.orig_name == "*" continue if column.synonym_for is not None: orig_column_id = column.synonym_for.id orig_match_key = f"{q_id},{orig_column_id}" syn_match_key = f"{q_id},{col_id}" if orig_match_key in num_date_match: num_date_match[syn_match_key] = num_date_match[orig_match_key] if orig_match_key in cell_match: cell_match[syn_match_key] = cell_match[orig_match_key] continue if column.type == "time" and num_flag: # TODO unify date format and enable comparision num_date_match[f"{q_id},{col_id}"] = column.type.upper() elif column.type == "number" and num_flag: num = float(word) min_col_val, max_col_val = column.value_range try: if min_col_val is not None and max_col_val is not None and num >= min_col_val and num <= max_col_val: num_date_match[f"{q_id},{col_id}"] = column.type.upper() except TypeError: pass else: if f"{q_id},{col_id}" in cell_match: continue if n>1: if word in column.cell_values: cell_match[f"{q_id},{col_id}"] = CELL_MATCH_START_FLAG if n>1: for m_q_id in range(q_id+1,q_id+n): cell_match[f"{m_q_id},{col_id}"] = CELL_MATCH_FLAG else: if word in STOPWORDS or word in PUNKS: continue if word in column.cell_values: tmp_match[0][f"{q_id},{col_id}"] = CELL_MATCH_START_FLAG elif word in column.tokens: tmp_match[1][f"{q_id},{col_id}"] = CELL_P_MATCH_FLAG n -= 1 if len(tmp_match[0])!=0: for q_col in tmp_match[0]: if q_col not in cell_match: cell_match[q_col] = tmp_match[0][q_col] elif len(tmp_match[0])==0 and len(tmp_match[1])<3: for q_col in tmp_match[1]: if q_col not in cell_match: cell_match[q_col] = tmp_match[1][q_col] q_id += 1 cv_link = {"num_date_match": num_date_match, "cell_match" : cell_match} if bert_idx_map is not None: new_cv_link = {} for m_type in cv_link: _match = {} for ij_str in cv_link[m_type]: q_id_str, col_tab_id_str = ij_str.split(",") q_id, col_tab_id = int(q_id_str), int(col_tab_id_str) real_q_id = bert_idx_map[q_id] _match[f"{real_q_id},{col_tab_id}"] = cv_link[m_type][ij_str] new_cv_link[m_type] = _match cv_link = new_cv_link return cv_link
Python
def _exec_cmd(command): """ Executes a command on Shell and returns stdout and stderr from the command. :param command: the string of the command to be executed :return: stdout: standard output of command , stderr standard error of command """ command_array = shlex.split(command) out = subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = out.communicate() lines_b = stdout.splitlines() lines = [] for line in lines_b: lines.append(line.decode("utf-8")) return lines, stderr
def _exec_cmd(command): """ Executes a command on Shell and returns stdout and stderr from the command. :param command: the string of the command to be executed :return: stdout: standard output of command , stderr standard error of command """ command_array = shlex.split(command) out = subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = out.communicate() lines_b = stdout.splitlines() lines = [] for line in lines_b: lines.append(line.decode("utf-8")) return lines, stderr
Python
def _exec_cmds(commands): """ Executes a command on Shell and returns stdout and stderr from the command. :param commands: the string of the command to be executed :return: stdout: standard output of command , stderr standard error of command """ out = [None] * len(commands) prev = None for i, command in enumerate(commands): command_array = shlex.split(command) if i == 0: out[i] = subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) else: out[i] = subprocess.Popen(command_array, stdin=out[i - 1].stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # for i in range(len(commands)): # stdout, stderr = out[i].communicate() stdout, stderr = out[len(commands) - 1].communicate() lines_b = stdout.splitlines() lines = [] for line in lines_b: lines.append(line.decode("utf-8")) return lines, stderr
def _exec_cmds(commands): """ Executes a command on Shell and returns stdout and stderr from the command. :param commands: the string of the command to be executed :return: stdout: standard output of command , stderr standard error of command """ out = [None] * len(commands) prev = None for i, command in enumerate(commands): command_array = shlex.split(command) if i == 0: out[i] = subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) else: out[i] = subprocess.Popen(command_array, stdin=out[i - 1].stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # for i in range(len(commands)): # stdout, stderr = out[i].communicate() stdout, stderr = out[len(commands) - 1].communicate() lines_b = stdout.splitlines() lines = [] for line in lines_b: lines.append(line.decode("utf-8")) return lines, stderr
Python
def _error_str(self): """ Converts error into string concatenated by space. :return: concatenated error with space. """ return ' '.join(self._errors)
def _error_str(self): """ Converts error into string concatenated by space. :return: concatenated error with space. """ return ' '.join(self._errors)
Python
def _verify_env_path(self): """ Verifies if the environment variable and darshan file exists. Environment Variables checked are DARSHAN_BIN_DIR and VaniDL_BIN_DIR DARSHAN_BIN_DIR : defines the bin directory of the darshan installation. VaniDL_BIN_DIR : defines the bin director of VaniDL installation. If returns false, the errors are appended to _errors class attribute :return: True if all variables are exists and False otherwise. """ dxt_file = os.path.exists(self._darshan_file) darshan_path = True if DARSHAN_DIR not in os.environ: darshan_path = False else: darshan_path = os.path.exists("{}/bin".format(os.environ[DARSHAN_DIR])) is_valid = True if not dxt_file: self._errors.append(str(ErrorCodes.EC1002)) is_valid = False if not darshan_path: self._errors.append(str(ErrorCodes.EC1003)) is_valid = False if is_valid: self._darshan_bin_dir = "{}/bin".format(os.environ[DARSHAN_DIR]) return is_valid
def _verify_env_path(self): """ Verifies if the environment variable and darshan file exists. Environment Variables checked are DARSHAN_BIN_DIR and VaniDL_BIN_DIR DARSHAN_BIN_DIR : defines the bin directory of the darshan installation. VaniDL_BIN_DIR : defines the bin director of VaniDL installation. If returns false, the errors are appended to _errors class attribute :return: True if all variables are exists and False otherwise. """ dxt_file = os.path.exists(self._darshan_file) darshan_path = True if DARSHAN_DIR not in os.environ: darshan_path = False else: darshan_path = os.path.exists("{}/bin".format(os.environ[DARSHAN_DIR])) is_valid = True if not dxt_file: self._errors.append(str(ErrorCodes.EC1002)) is_valid = False if not darshan_path: self._errors.append(str(ErrorCodes.EC1003)) is_valid = False if is_valid: self._darshan_bin_dir = "{}/bin".format(os.environ[DARSHAN_DIR]) return is_valid
Python
def _check_loaded(self): """ Check if the Load() function was called. If it is called the internal attribute _loaded is set. :return: True, if Load() function was called, and False, otherwise. """ if self._loaded: return True return False
def _check_loaded(self): """ Check if the Load() function was called. If it is called the internal attribute _loaded is set. :return: True, if Load() function was called, and False, otherwise. """ if self._loaded: return True return False
Python
def _throw_if_not_loaded(self): """ Throws an exception with Error Code 1001 if the Load() function is not called. :return: Exception if Load() not called. """ if not self._check_loaded(): raise Exception(str(ErrorCodes.EC1001))
def _throw_if_not_loaded(self): """ Throws an exception with Error Code 1001 if the Load() function is not called. :return: Exception if Load() not called. """ if not self._check_loaded(): raise Exception(str(ErrorCodes.EC1001))
Python
def _get_darshan_dxt_exe(self): """ Returns path of the Darshan DXT parser executable :return: string of darshan-dxt-parser executable. """ return "{}/darshan-dxt-parser".format(self._darshan_bin_dir)
def _get_darshan_dxt_exe(self): """ Returns path of the Darshan DXT parser executable :return: string of darshan-dxt-parser executable. """ return "{}/darshan-dxt-parser".format(self._darshan_bin_dir)
Python
def _get_darshan_exe(self): """ Returns path of the Darshan parser executable :return: string of darshan-parser executable. """ return "{}/darshan-parser".format(self._darshan_bin_dir)
def _get_darshan_exe(self): """ Returns path of the Darshan parser executable :return: string of darshan-parser executable. """ return "{}/darshan-parser".format(self._darshan_bin_dir)
Python
def _get_darshan_convert_exe(self): """ Returns path of the Darshan convert executable :return: string of darshan-convert executable. """ return "{}/darshan-convert".format(self._darshan_bin_dir)
def _get_darshan_convert_exe(self): """ Returns path of the Darshan convert executable :return: string of darshan-convert executable. """ return "{}/darshan-convert".format(self._darshan_bin_dir)
Python
def _parse_trace(self): """ Parses the darshan trace to get aggregate values. This is useful when we want aggregate values only. Returns ------- dataframe with values of darshan counters. """ cmds = ["{} {}".format(self._get_darshan_exe(), self._darshan_file), "egrep -v '^(#|$)'"] lines, stderr = _exec_cmds(cmds) darshan_map = {} i = 1 pb_total = len(lines) for line in lines: if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Parsing Darshan File') i += 1 values = line.split() if values[2] not in darshan_map: # create a new record darshan_map[values[2]] = {} darshan_map[values[2]]["Module"] = values[0] darshan_map[values[2]]["Rank"] = values[1] darshan_map[values[2]][values[3]] = values[4] darshan_map[values[2]]["Filename"] = values[5] darshan_map[values[2]]["Mount"] = values[6] darshan_map[values[2]]["FS"] = values[7] else: # update existing darshan_map[values[2]][values[3]] = values[4] df = pd.DataFrame.from_dict(darshan_map, orient='index') return df
def _parse_trace(self): """ Parses the darshan trace to get aggregate values. This is useful when we want aggregate values only. Returns ------- dataframe with values of darshan counters. """ cmds = ["{} {}".format(self._get_darshan_exe(), self._darshan_file), "egrep -v '^(#|$)'"] lines, stderr = _exec_cmds(cmds) darshan_map = {} i = 1 pb_total = len(lines) for line in lines: if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Parsing Darshan File') i += 1 values = line.split() if values[2] not in darshan_map: # create a new record darshan_map[values[2]] = {} darshan_map[values[2]]["Module"] = values[0] darshan_map[values[2]]["Rank"] = values[1] darshan_map[values[2]][values[3]] = values[4] darshan_map[values[2]]["Filename"] = values[5] darshan_map[values[2]]["Mount"] = values[6] darshan_map[values[2]]["FS"] = values[7] else: # update existing darshan_map[values[2]][values[3]] = values[4] df = pd.DataFrame.from_dict(darshan_map, orient='index') return df
Python
def _parse_dxt_trace(self): """ Parses the dxt trace and creates a Pandas Dataframe out of it with all the DXT columns. The dataframe returned has the following columns: ['Module', 'Filename', 'Rank', 'Operation', 'Segment', 'Offset', 'Length', 'Start', 'End'] :return: a dataframe of DXT values. """ cmd = "{} {}".format(self._get_darshan_dxt_exe(), self._darshan_file) lines, stderr = _exec_cmd(cmd) io_lines = False pb_total = len(lines) df = pd.DataFrame(index=numpy.arange(pb_total), columns=['Module', 'Filename', 'Rank', 'Operation', 'Segment', 'Offset', 'Length', 'Start', 'End']) temp_filename = "" i = 1 index = 0 for line in lines: if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Parsing DXT File') i += 1 if line == '': io_lines = False continue elif "DXT, file_id" in line: temp_filename = line.split(" ")[5] io_lines = False continue elif "Module" in line: io_lines = True elif io_lines: # Module,Rank, Wt/Rd, Segment,Offset,Length,Start(s),End(s) vals = line.split() df.loc[index] = {'Module': vals[0], 'Filename': temp_filename, 'Rank': int(vals[1]), 'Operation': vals[2], 'Segment': int(vals[3]), 'Offset': int(vals[4]), 'Length': int(vals[5]), 'Start': float(vals[6]), 'End': float(vals[7])} index += 1 df = df.drop(df.index[index:]) return df
def _parse_dxt_trace(self): """ Parses the dxt trace and creates a Pandas Dataframe out of it with all the DXT columns. The dataframe returned has the following columns: ['Module', 'Filename', 'Rank', 'Operation', 'Segment', 'Offset', 'Length', 'Start', 'End'] :return: a dataframe of DXT values. """ cmd = "{} {}".format(self._get_darshan_dxt_exe(), self._darshan_file) lines, stderr = _exec_cmd(cmd) io_lines = False pb_total = len(lines) df = pd.DataFrame(index=numpy.arange(pb_total), columns=['Module', 'Filename', 'Rank', 'Operation', 'Segment', 'Offset', 'Length', 'Start', 'End']) temp_filename = "" i = 1 index = 0 for line in lines: if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Parsing DXT File') i += 1 if line == '': io_lines = False continue elif "DXT, file_id" in line: temp_filename = line.split(" ")[5] io_lines = False continue elif "Module" in line: io_lines = True elif io_lines: # Module,Rank, Wt/Rd, Segment,Offset,Length,Start(s),End(s) vals = line.split() df.loc[index] = {'Module': vals[0], 'Filename': temp_filename, 'Rank': int(vals[1]), 'Operation': vals[2], 'Segment': int(vals[3]), 'Offset': int(vals[4]), 'Length': int(vals[5]), 'Start': float(vals[6]), 'End': float(vals[7])} index += 1 df = df.drop(df.index[index:]) return df
Python
def _pre_process_df(self, data_paths_include): """ Processes the DXT Dataframe and computes additional columns. Main transformations are: - Change Filename into categorical column. - Compute I/O time based on Start and End. - Compute per operation Bandwidth achieved. - Extract Extension of the filename. - Remove python files which were traced. :param: data_paths_include: paths to include """ # make Filename categorical self._df = self._df.fillna('0') if "POSIX" in self._df['Module'].unique(): self._df = self._df.astype({'POSIX_READS': 'int64', 'POSIX_SEQ_READS': 'int64', 'POSIX_CONSEC_READS': 'int64', 'POSIX_WRITES': 'int64', 'POSIX_SEQ_WRITES': 'int64', 'POSIX_CONSEC_WRITES': 'int64', 'POSIX_BYTES_WRITTEN': 'int64', 'POSIX_BYTES_READ': 'int64', 'POSIX_F_WRITE_TIME': 'float64', 'POSIX_F_READ_TIME': 'float64', 'POSIX_F_META_TIME': 'float64' }, errors='ignore') if "MPIIO" in self._df['Module'].unique(): self._df = self._df.astype({'MPIIO_BYTES_READ': 'int64', 'MPIIO_BYTES_WRITTEN': 'int64', 'MPIIO_F_READ_TIME': 'float64', 'MPIIO_F_WRITE_TIME': 'float64', 'MPIIO_F_META_TIME': 'float64' }, errors='ignore') if "STDIO" in self._df['Module'].unique(): self._df = self._df.astype({'STDIO_BYTES_READ': 'int64', 'STDIO_BYTES_WRITTEN': 'int64', 'STDIO_F_READ_TIME': 'float64', 'STDIO_F_WRITE_TIME': 'float64', 'STDIO_F_META_TIME': 'float64' }, errors='ignore') if "H5D" in self._df['Module'].unique(): self._df = self._df.astype({'H5D_BYTES_READ': 'int64', 'H5D_BYTES_WRITTEN': 'int64', 'H5D_F_READ_TIME': 'float64', 'H5D_F_WRITE_TIME': 'float64', 'H5D_F_META_TIME': 'float64' }, errors='ignore') self._dxt_df["io_time"] = 0 self._dxt_df["bandwidth"] = 0 self._dxt_df["ext"] = 0 if self._dxt_df['Module'].count() > 0: self._dxt_df["Filename"] = self._dxt_df["Filename"].astype('category') self._dxt_df['io_time'] = self._dxt_df['End'] - self._dxt_df['Start'] self._dxt_df.loc[self._dxt_df['io_time'] == 0, 'io_time'] = 0.001 self._dxt_df['bandwidth'] = self._dxt_df['Length'] / self._dxt_df['io_time'] self._dxt_df['ext'] = self._dxt_df.Filename.apply(lambda x: x.split('.')[-1]) #self._dxt_df.loc[self._dxt_df['Filename'].str.contains("\.") == False, 'ext'] = "" self._dxt_df = self._dxt_df[~self._dxt_df['Filename'].str.contains("py")] self._df["Filename"] = self._df["Filename"].astype('category') self._df['ext'] = self._df.Filename.apply(lambda x: x.split('.')[-1]) self._df.loc[self._df['Filename'].str.contains("\.") == False, 'ext'] = "" # remove .py files self._df = self._df[~self._df['Filename'].str.contains("py")] self._df = self._df[~self._df['Filename'].str.contains("<STDERR>")] self._df = self._df[~self._df['Filename'].str.contains("<STDOUT>")] if len(data_paths_include) > 0: # print(len(data_paths_include)) for data_path in data_paths_include: self._dxt_df = self._dxt_df[self._dxt_df['Filename'].str.contains(data_path)] self._df = self._df[self._df['Filename'].str.contains(data_path)]
def _pre_process_df(self, data_paths_include): """ Processes the DXT Dataframe and computes additional columns. Main transformations are: - Change Filename into categorical column. - Compute I/O time based on Start and End. - Compute per operation Bandwidth achieved. - Extract Extension of the filename. - Remove python files which were traced. :param: data_paths_include: paths to include """ # make Filename categorical self._df = self._df.fillna('0') if "POSIX" in self._df['Module'].unique(): self._df = self._df.astype({'POSIX_READS': 'int64', 'POSIX_SEQ_READS': 'int64', 'POSIX_CONSEC_READS': 'int64', 'POSIX_WRITES': 'int64', 'POSIX_SEQ_WRITES': 'int64', 'POSIX_CONSEC_WRITES': 'int64', 'POSIX_BYTES_WRITTEN': 'int64', 'POSIX_BYTES_READ': 'int64', 'POSIX_F_WRITE_TIME': 'float64', 'POSIX_F_READ_TIME': 'float64', 'POSIX_F_META_TIME': 'float64' }, errors='ignore') if "MPIIO" in self._df['Module'].unique(): self._df = self._df.astype({'MPIIO_BYTES_READ': 'int64', 'MPIIO_BYTES_WRITTEN': 'int64', 'MPIIO_F_READ_TIME': 'float64', 'MPIIO_F_WRITE_TIME': 'float64', 'MPIIO_F_META_TIME': 'float64' }, errors='ignore') if "STDIO" in self._df['Module'].unique(): self._df = self._df.astype({'STDIO_BYTES_READ': 'int64', 'STDIO_BYTES_WRITTEN': 'int64', 'STDIO_F_READ_TIME': 'float64', 'STDIO_F_WRITE_TIME': 'float64', 'STDIO_F_META_TIME': 'float64' }, errors='ignore') if "H5D" in self._df['Module'].unique(): self._df = self._df.astype({'H5D_BYTES_READ': 'int64', 'H5D_BYTES_WRITTEN': 'int64', 'H5D_F_READ_TIME': 'float64', 'H5D_F_WRITE_TIME': 'float64', 'H5D_F_META_TIME': 'float64' }, errors='ignore') self._dxt_df["io_time"] = 0 self._dxt_df["bandwidth"] = 0 self._dxt_df["ext"] = 0 if self._dxt_df['Module'].count() > 0: self._dxt_df["Filename"] = self._dxt_df["Filename"].astype('category') self._dxt_df['io_time'] = self._dxt_df['End'] - self._dxt_df['Start'] self._dxt_df.loc[self._dxt_df['io_time'] == 0, 'io_time'] = 0.001 self._dxt_df['bandwidth'] = self._dxt_df['Length'] / self._dxt_df['io_time'] self._dxt_df['ext'] = self._dxt_df.Filename.apply(lambda x: x.split('.')[-1]) #self._dxt_df.loc[self._dxt_df['Filename'].str.contains("\.") == False, 'ext'] = "" self._dxt_df = self._dxt_df[~self._dxt_df['Filename'].str.contains("py")] self._df["Filename"] = self._df["Filename"].astype('category') self._df['ext'] = self._df.Filename.apply(lambda x: x.split('.')[-1]) self._df.loc[self._df['Filename'].str.contains("\.") == False, 'ext'] = "" # remove .py files self._df = self._df[~self._df['Filename'].str.contains("py")] self._df = self._df[~self._df['Filename'].str.contains("<STDERR>")] self._df = self._df[~self._df['Filename'].str.contains("<STDOUT>")] if len(data_paths_include) > 0: # print(len(data_paths_include)) for data_path in data_paths_include: self._dxt_df = self._dxt_df[self._dxt_df['Filename'].str.contains(data_path)] self._df = self._df[self._df['Filename'].str.contains(data_path)]
Python
def _analyze_access_pattern(self): """ This function extracts file access pattern using the darshan utilities. It specifically uses a modified perl script called darshan_job_summary.pl which calculates the access pattern to be sequential, consecutive, or random. :return: a file map containing per file access pattern observed by darshan. """ pattern_file_map = {} for index, row in self._df.iterrows(): file = os.path.splitext(ntpath.basename(row['Filename']))[0] if row['Module'] == "POSIX": pattern_file_map[row['Filename']] = {"name": file, "read": [int(row['POSIX_READS']), int(row['POSIX_SEQ_READS']), int(row['POSIX_CONSEC_READS'])], "write": [int(row['POSIX_WRITES']), int(row['POSIX_SEQ_WRITES']), int(row['POSIX_CONSEC_WRITES'])], "io_bytes": int(row['POSIX_BYTES_WRITTEN']) + int(row['POSIX_BYTES_READ']), "io_time": float(row['POSIX_F_WRITE_TIME']) + float(row['POSIX_F_READ_TIME'])} elif row['Module'] == "MPIIO": pattern_file_map[row['Filename']] = {"name": file, "read": [0, 0, 0], "write": [0, 0, 0], "io_bytes": int(row['MPIIO_BYTES_READ']) + int(row['MPIIO_BYTES_WRITTEN']), "io_time": float(row['MPIIO_F_READ_TIME']) + float(row['MPIIO_F_WRITE_TIME'])} elif row['Module'] == "STDIO": pattern_file_map[row['Filename']] = {"name": file, "read": [0, 0, 0], "write": [0, 0, 0], "io_bytes": int(row['STDIO_BYTES_READ']) + int(row['STDIO_BYTES_WRITTEN']), "io_time": float(row['STDIO_F_READ_TIME']) + float(row['STDIO_F_WRITE_TIME'])} elif row['Module'] == "H5D": pattern_file_map[row['Filename']] = {"name": file, "read": [0, 0, 0], "write": [0, 0, 0], "io_bytes": int(row['H5D_BYTES_READ']) + int(row['H5D_BYTES_WRITTEN']), "io_time": float(row['H5D_F_READ_TIME']) + float(row['H5D_F_WRITE_TIME'])} return pattern_file_map
def _analyze_access_pattern(self): """ This function extracts file access pattern using the darshan utilities. It specifically uses a modified perl script called darshan_job_summary.pl which calculates the access pattern to be sequential, consecutive, or random. :return: a file map containing per file access pattern observed by darshan. """ pattern_file_map = {} for index, row in self._df.iterrows(): file = os.path.splitext(ntpath.basename(row['Filename']))[0] if row['Module'] == "POSIX": pattern_file_map[row['Filename']] = {"name": file, "read": [int(row['POSIX_READS']), int(row['POSIX_SEQ_READS']), int(row['POSIX_CONSEC_READS'])], "write": [int(row['POSIX_WRITES']), int(row['POSIX_SEQ_WRITES']), int(row['POSIX_CONSEC_WRITES'])], "io_bytes": int(row['POSIX_BYTES_WRITTEN']) + int(row['POSIX_BYTES_READ']), "io_time": float(row['POSIX_F_WRITE_TIME']) + float(row['POSIX_F_READ_TIME'])} elif row['Module'] == "MPIIO": pattern_file_map[row['Filename']] = {"name": file, "read": [0, 0, 0], "write": [0, 0, 0], "io_bytes": int(row['MPIIO_BYTES_READ']) + int(row['MPIIO_BYTES_WRITTEN']), "io_time": float(row['MPIIO_F_READ_TIME']) + float(row['MPIIO_F_WRITE_TIME'])} elif row['Module'] == "STDIO": pattern_file_map[row['Filename']] = {"name": file, "read": [0, 0, 0], "write": [0, 0, 0], "io_bytes": int(row['STDIO_BYTES_READ']) + int(row['STDIO_BYTES_WRITTEN']), "io_time": float(row['STDIO_F_READ_TIME']) + float(row['STDIO_F_WRITE_TIME'])} elif row['Module'] == "H5D": pattern_file_map[row['Filename']] = {"name": file, "read": [0, 0, 0], "write": [0, 0, 0], "io_bytes": int(row['H5D_BYTES_READ']) + int(row['H5D_BYTES_WRITTEN']), "io_time": float(row['H5D_F_READ_TIME']) + float(row['H5D_F_WRITE_TIME'])} return pattern_file_map
Python
def _explore_hdf5(self, h5object, name): import h5py """ Explores the hdf5 file hierarchically and retrieves all dataset information Parameters ---------- h5object: actual h5 object name: name for the object Returns ------- map of information about the hdf5 object. """ is_dataset = isinstance(h5object, h5py.Dataset) is_group = isinstance(h5object, h5py.Group) is_file = isinstance(h5object, h5py.File) if is_group: group_map = {"type": "group", "name": name} key_maps = [] for key in h5object.keys(): key_map = self._explore_hdf5(h5object[key], key) key_maps.append(key_map) group_map["keys"] = key_maps return group_map elif is_file: file_map = {"type": "file", "name": name} key_maps = [] for key in h5object.keys(): key_map = self._explore_hdf5(h5object[key], key) key_maps.append(key_map) file_map["keys"] = key_maps return file_map elif is_dataset: dataset_map = {"type": "dataset", "name": name, "size": h5object.size, "shape": h5object.shape, "obj": h5object} return dataset_map else: return None
def _explore_hdf5(self, h5object, name): import h5py """ Explores the hdf5 file hierarchically and retrieves all dataset information Parameters ---------- h5object: actual h5 object name: name for the object Returns ------- map of information about the hdf5 object. """ is_dataset = isinstance(h5object, h5py.Dataset) is_group = isinstance(h5object, h5py.Group) is_file = isinstance(h5object, h5py.File) if is_group: group_map = {"type": "group", "name": name} key_maps = [] for key in h5object.keys(): key_map = self._explore_hdf5(h5object[key], key) key_maps.append(key_map) group_map["keys"] = key_maps return group_map elif is_file: file_map = {"type": "file", "name": name} key_maps = [] for key in h5object.keys(): key_map = self._explore_hdf5(h5object[key], key) key_maps.append(key_map) file_map["keys"] = key_maps return file_map elif is_dataset: dataset_map = {"type": "dataset", "name": name, "size": h5object.size, "shape": h5object.shape, "obj": h5object} return dataset_map else: return None
Python
def _parse_tb_logs(self, tensorflow_logs_dir): """ :param tensorflow_logs_dir, log directory for tensorboard logs. :return JSON of IPA """ from tensorboard_plugin_profile.convert import input_pipeline_proto_to_gviz from tensorboard_plugin_profile.protobuf import input_pipeline_pb2 fileExt = "*input_pipeline.pb" input_pipeline_files = list(pathlib.Path(tensorflow_logs_dir).rglob(fileExt)) ipa_hosts = {} for file in input_pipeline_files: filename = os.path.basename(file) hostname = filename.split(".")[0] ipa_hosts[hostname] = {} ipa_hosts[hostname]['log_file'] = str(file) pb_total = len(ipa_hosts.keys()) i = 1 for key in ipa_hosts.keys(): if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Reading TB IPA log file') i += 1 ipa = input_pipeline_pb2.InputPipelineAnalysisResult() f = open(ipa_hosts[key]['log_file'], "rb") ipa.ParseFromString(f.read()) (table_description, data, custom_properties) = input_pipeline_proto_to_gviz.get_step_breakdown_table_args( ipa) # print(custom_properties) # print(table_description) ipa_hosts[key]['step_data'] = {} ipa_hosts[key]['step_data']['data'] = [] ipa_hosts[key]['step_data']['custom_properties'] = custom_properties for index, step_data_val in enumerate(data): step_data = {} # print(step_data_val) step_data['stepnum'] = int(step_data_val[0]) step_data['deviceComputeTimeMs'] = float(step_data_val[1]) step_data['deviceToDeviceTimeMs'] = float(step_data_val[2]) step_data['hostComputeTimeMs'] = float(step_data_val[3]) step_data['kernelLaunchTimeMs'] = float(step_data_val[4]) step_data['infeedTimeMs'] = float(step_data_val[5]) step_data['hostComputeTimeMs'] = float(step_data_val[6]) step_data['outfeedTimeMs'] = float(step_data_val[7]) step_data['compileTimeMs'] = float(step_data_val[8]) ipa_hosts[key]['step_data']['data'].append(step_data) (table_description, data, custom_properties) = input_pipeline_proto_to_gviz.get_input_op_table_args(ipa) ipa_hosts[key]['op_data'] = {} ipa_hosts[key]['op_data']['data'] = [] ipa_hosts[key]['op_data']['custom_properties'] = custom_properties for index, op_data_val in enumerate(data): op_data = {} # print(step_data_val) op_data['opName'] = op_data_val[0] op_data['count'] = int(op_data_val[1]) op_data['timeInMs'] = float(op_data_val[2]) op_data['timeInPercent'] = float(op_data_val[3]) op_data['selfTimeInMs'] = float(op_data_val[4]) op_data['selfTimeInPercent'] = float(op_data_val[5]) op_data['category'] = op_data_val[6] ipa_hosts[key]['op_data']['data'].append(op_data) self._tb_input_pipeline = ipa_hosts return self._tb_input_pipeline
def _parse_tb_logs(self, tensorflow_logs_dir): """ :param tensorflow_logs_dir, log directory for tensorboard logs. :return JSON of IPA """ from tensorboard_plugin_profile.convert import input_pipeline_proto_to_gviz from tensorboard_plugin_profile.protobuf import input_pipeline_pb2 fileExt = "*input_pipeline.pb" input_pipeline_files = list(pathlib.Path(tensorflow_logs_dir).rglob(fileExt)) ipa_hosts = {} for file in input_pipeline_files: filename = os.path.basename(file) hostname = filename.split(".")[0] ipa_hosts[hostname] = {} ipa_hosts[hostname]['log_file'] = str(file) pb_total = len(ipa_hosts.keys()) i = 1 for key in ipa_hosts.keys(): if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Reading TB IPA log file') i += 1 ipa = input_pipeline_pb2.InputPipelineAnalysisResult() f = open(ipa_hosts[key]['log_file'], "rb") ipa.ParseFromString(f.read()) (table_description, data, custom_properties) = input_pipeline_proto_to_gviz.get_step_breakdown_table_args( ipa) # print(custom_properties) # print(table_description) ipa_hosts[key]['step_data'] = {} ipa_hosts[key]['step_data']['data'] = [] ipa_hosts[key]['step_data']['custom_properties'] = custom_properties for index, step_data_val in enumerate(data): step_data = {} # print(step_data_val) step_data['stepnum'] = int(step_data_val[0]) step_data['deviceComputeTimeMs'] = float(step_data_val[1]) step_data['deviceToDeviceTimeMs'] = float(step_data_val[2]) step_data['hostComputeTimeMs'] = float(step_data_val[3]) step_data['kernelLaunchTimeMs'] = float(step_data_val[4]) step_data['infeedTimeMs'] = float(step_data_val[5]) step_data['hostComputeTimeMs'] = float(step_data_val[6]) step_data['outfeedTimeMs'] = float(step_data_val[7]) step_data['compileTimeMs'] = float(step_data_val[8]) ipa_hosts[key]['step_data']['data'].append(step_data) (table_description, data, custom_properties) = input_pipeline_proto_to_gviz.get_input_op_table_args(ipa) ipa_hosts[key]['op_data'] = {} ipa_hosts[key]['op_data']['data'] = [] ipa_hosts[key]['op_data']['custom_properties'] = custom_properties for index, op_data_val in enumerate(data): op_data = {} # print(step_data_val) op_data['opName'] = op_data_val[0] op_data['count'] = int(op_data_val[1]) op_data['timeInMs'] = float(op_data_val[2]) op_data['timeInPercent'] = float(op_data_val[3]) op_data['selfTimeInMs'] = float(op_data_val[4]) op_data['selfTimeInPercent'] = float(op_data_val[5]) op_data['category'] = op_data_val[6] ipa_hosts[key]['op_data']['data'].append(op_data) self._tb_input_pipeline = ipa_hosts return self._tb_input_pipeline
Python
def Load(self, darshan_file, preprocessed_dir="/tmp/temp_analysis", data_paths_include=[], tensorflow_logs_dir=None): """ This functions bootstraps the VaniDLr with the given darshan filename Parameters ---------- :param darshan_file: Darshan's DXT trace file. :param preprocessed_dir: full path where post processing checkpoints can be made for faster loading. :param data_paths_include: paths to include for I/O Analysis. :param tensorflow_logs_dir: directory where tensorflow logs are present. :return: Exception with Error code 1000, if darshan file is not passed. Exception with Error code 1002, if darshan file is invalid. Exception with Error code 1003, if environment variable DARSHAN_BIN_DIR is not set correctly. Exception with Error code 1004, if environment variable VaniDL_BIN_DIR is not set correctly. True, if loading was successful """ if darshan_file is None: raise SystemExit(str(ErrorCodes.EC1000)) self._darshan_file = darshan_file self._preprocessed_dir = preprocessed_dir if not self._verify_env_path(): return False if not os.path.exists(preprocessed_dir): os.mkdir(preprocessed_dir) filename = os.path.splitext(ntpath.basename(darshan_file))[0] io_df_dxt_filename = "{}/{}_io_dxt_df.csv".format(preprocessed_dir, filename) if not os.path.exists(io_df_dxt_filename): self._dxt_df = self._parse_dxt_trace() self._dxt_df.to_csv(index=False, path_or_buf=io_df_dxt_filename) else: self._dxt_df = pd.read_csv(io_df_dxt_filename) print("Loaded Pre-processed DXT DF from file: {}".format(io_df_dxt_filename)) io_df_filename = "{}/{}_io_df.csv".format(preprocessed_dir, filename) if not os.path.exists(io_df_filename): self._df = self._parse_trace() self._df.to_csv(index=False, path_or_buf=io_df_filename) else: self._df = pd.read_csv(io_df_filename) print("Loaded Pre-processed DF from file: {}".format(io_df_filename)) self._pre_process_df(data_paths_include) pattern_json = "{}/{}_pattern.json".format(preprocessed_dir, filename) if not os.path.exists(pattern_json): self._file_access_pattern = self._analyze_access_pattern() with open(pattern_json, 'w') as outfile: json.dump(self._file_access_pattern, outfile) else: with open(pattern_json) as json_file: self._file_access_pattern = json.load(json_file) print("Loaded Pre-processed Pattern file: {}".format(pattern_json)) if tensorflow_logs_dir is not None: ipa_json = "{}/tb_ipa.json".format(self._preprocessed_dir) if not os.path.exists(ipa_json): self._tb_input_pipeline = self._parse_tb_logs(tensorflow_logs_dir) with open(ipa_json, 'w') as outfile: json.dump(self._tb_input_pipeline, outfile) else: with open(ipa_json) as json_file: self._tb_input_pipeline = json.load(json_file) print("Loaded Pre-processed Input Analyzer file: {}".format(ipa_json)) self._errors = [] self._loaded = True return True
def Load(self, darshan_file, preprocessed_dir="/tmp/temp_analysis", data_paths_include=[], tensorflow_logs_dir=None): """ This functions bootstraps the VaniDLr with the given darshan filename Parameters ---------- :param darshan_file: Darshan's DXT trace file. :param preprocessed_dir: full path where post processing checkpoints can be made for faster loading. :param data_paths_include: paths to include for I/O Analysis. :param tensorflow_logs_dir: directory where tensorflow logs are present. :return: Exception with Error code 1000, if darshan file is not passed. Exception with Error code 1002, if darshan file is invalid. Exception with Error code 1003, if environment variable DARSHAN_BIN_DIR is not set correctly. Exception with Error code 1004, if environment variable VaniDL_BIN_DIR is not set correctly. True, if loading was successful """ if darshan_file is None: raise SystemExit(str(ErrorCodes.EC1000)) self._darshan_file = darshan_file self._preprocessed_dir = preprocessed_dir if not self._verify_env_path(): return False if not os.path.exists(preprocessed_dir): os.mkdir(preprocessed_dir) filename = os.path.splitext(ntpath.basename(darshan_file))[0] io_df_dxt_filename = "{}/{}_io_dxt_df.csv".format(preprocessed_dir, filename) if not os.path.exists(io_df_dxt_filename): self._dxt_df = self._parse_dxt_trace() self._dxt_df.to_csv(index=False, path_or_buf=io_df_dxt_filename) else: self._dxt_df = pd.read_csv(io_df_dxt_filename) print("Loaded Pre-processed DXT DF from file: {}".format(io_df_dxt_filename)) io_df_filename = "{}/{}_io_df.csv".format(preprocessed_dir, filename) if not os.path.exists(io_df_filename): self._df = self._parse_trace() self._df.to_csv(index=False, path_or_buf=io_df_filename) else: self._df = pd.read_csv(io_df_filename) print("Loaded Pre-processed DF from file: {}".format(io_df_filename)) self._pre_process_df(data_paths_include) pattern_json = "{}/{}_pattern.json".format(preprocessed_dir, filename) if not os.path.exists(pattern_json): self._file_access_pattern = self._analyze_access_pattern() with open(pattern_json, 'w') as outfile: json.dump(self._file_access_pattern, outfile) else: with open(pattern_json) as json_file: self._file_access_pattern = json.load(json_file) print("Loaded Pre-processed Pattern file: {}".format(pattern_json)) if tensorflow_logs_dir is not None: ipa_json = "{}/tb_ipa.json".format(self._preprocessed_dir) if not os.path.exists(ipa_json): self._tb_input_pipeline = self._parse_tb_logs(tensorflow_logs_dir) with open(ipa_json, 'w') as outfile: json.dump(self._tb_input_pipeline, outfile) else: with open(ipa_json) as json_file: self._tb_input_pipeline = json.load(json_file) print("Loaded Pre-processed Input Analyzer file: {}".format(ipa_json)) self._errors = [] self._loaded = True return True
Python
def GetDXTAsDF(self): """ Get the processed DXT traced as a Pandas Dataframe :return: Pandas Dataframe """ self._throw_if_not_loaded() return self._dxt_df
def GetDXTAsDF(self): """ Get the processed DXT traced as a Pandas Dataframe :return: Pandas Dataframe """ self._throw_if_not_loaded() return self._dxt_df
Python
def GetTraceAsDF(self): """ Get the processed trace as a Pandas Dataframe :return: Pandas Dataframe """ self._throw_if_not_loaded() return self._df
def GetTraceAsDF(self): """ Get the processed trace as a Pandas Dataframe :return: Pandas Dataframe """ self._throw_if_not_loaded() return self._df
Python
def GetJobTime(self): """ Get the total time spent in the job. :return: time in seconds. """ self._throw_if_not_loaded() cmds = ["{} {}".format(self._get_darshan_exe(), self._darshan_file), "egrep 'run time'", "cut -d' ' -f4"] return_val, stderr = _exec_cmds(cmds) job_time = float(return_val[0]) return job_time
def GetJobTime(self): """ Get the total time spent in the job. :return: time in seconds. """ self._throw_if_not_loaded() cmds = ["{} {}".format(self._get_darshan_exe(), self._darshan_file), "egrep 'run time'", "cut -d' ' -f4"] return_val, stderr = _exec_cmds(cmds) job_time = float(return_val[0]) return job_time
Python
def GetIOTime(self, filepath=None, rank=None): """ Returns the total time spent by job spent in I/O. If file path and rank are passed, data is further filtered. :param filepath: Filters data by filename :param rank: Filters data by rank :return: Returns I/O time in seconds. """ self._throw_if_not_loaded() temp_df = self._df if self._dxt_df.count()['Module'] == 0: if filepath is not None and rank is None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is None: val = 0 if "POSIX" in temp_df['Module'].unique(): val += temp_df['POSIX_F_READ_TIME'].sum() + \ temp_df['POSIX_F_WRITE_TIME'].sum() + \ temp_df['POSIX_F_META_TIME'].sum() if "MPIIO" in temp_df['Module'].unique(): val += temp_df['MPIIO_F_READ_TIME'].sum() + \ temp_df['MPIIO_F_WRITE_TIME'].sum() + \ temp_df['MPIIO_F_META_TIME'].sum() if "STDIO" in temp_df['Module'].unique(): val += temp_df['STDIO_F_META_TIME'].sum() + \ temp_df['STDIO_F_WRITE_TIME'].sum() + \ temp_df['STDIO_F_READ_TIME'].sum() if "H5D" in temp_df['Module'].unique(): val += temp_df['H5D_F_READ_TIME'].sum() + \ temp_df['H5D_F_WRITE_TIME'].sum() + \ temp_df['H5D_F_META_TIME'].sum() return val if self._dxt_df.count()['Module'] == 0: raise Exception(str(ErrorCodes.EC1010)) temp_df = self._dxt_df if filepath is not None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is not None: temp_df = temp_df[temp_df['Rank'].eq(rank)] return temp_df['io_time'].sum() / temp_df['Rank'].nunique()
def GetIOTime(self, filepath=None, rank=None): """ Returns the total time spent by job spent in I/O. If file path and rank are passed, data is further filtered. :param filepath: Filters data by filename :param rank: Filters data by rank :return: Returns I/O time in seconds. """ self._throw_if_not_loaded() temp_df = self._df if self._dxt_df.count()['Module'] == 0: if filepath is not None and rank is None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is None: val = 0 if "POSIX" in temp_df['Module'].unique(): val += temp_df['POSIX_F_READ_TIME'].sum() + \ temp_df['POSIX_F_WRITE_TIME'].sum() + \ temp_df['POSIX_F_META_TIME'].sum() if "MPIIO" in temp_df['Module'].unique(): val += temp_df['MPIIO_F_READ_TIME'].sum() + \ temp_df['MPIIO_F_WRITE_TIME'].sum() + \ temp_df['MPIIO_F_META_TIME'].sum() if "STDIO" in temp_df['Module'].unique(): val += temp_df['STDIO_F_META_TIME'].sum() + \ temp_df['STDIO_F_WRITE_TIME'].sum() + \ temp_df['STDIO_F_READ_TIME'].sum() if "H5D" in temp_df['Module'].unique(): val += temp_df['H5D_F_READ_TIME'].sum() + \ temp_df['H5D_F_WRITE_TIME'].sum() + \ temp_df['H5D_F_META_TIME'].sum() return val if self._dxt_df.count()['Module'] == 0: raise Exception(str(ErrorCodes.EC1010)) temp_df = self._dxt_df if filepath is not None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is not None: temp_df = temp_df[temp_df['Rank'].eq(rank)] return temp_df['io_time'].sum() / temp_df['Rank'].nunique()
Python
def GetIOSize(self, filepath=None, rank=None): """ Returns the total I/O in bytes performed by job spent in I/O. If file path and rank are passed, data is further filtered. :param filepath: Filters data by filename :param rank: Filters data by rank :return: Returns I/O in bytes. """ self._throw_if_not_loaded() temp_df = self._df if filepath is not None and rank is None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is None: val = 0 if "POSIX" in temp_df['Module'].unique(): val += temp_df['POSIX_BYTES_WRITTEN'].sum() + \ temp_df['POSIX_BYTES_READ'].sum() if "MPIIO" in temp_df['Module'].unique(): val += temp_df['MPIIO_BYTES_READ'].sum() + \ temp_df['MPIIO_BYTES_WRITTEN'].sum() if "STDIO" in temp_df['Module'].unique(): val += temp_df['STDIO_BYTES_WRITTEN'].sum() + \ temp_df['STDIO_BYTES_READ'].sum() if "H5D" in temp_df['Module'].unique(): val += temp_df['H5D_BYTES_READ'].sum() + \ temp_df['H5D_BYTES_WRITTEN'].sum() return val if self._dxt_df.count()['Module'] == 0: raise Exception(str(ErrorCodes.EC1010)) temp_df = self._dxt_df if filepath is not None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is not None: temp_df = temp_df[temp_df['Rank'].eq(rank)] return temp_df['Length'].sum()
def GetIOSize(self, filepath=None, rank=None): """ Returns the total I/O in bytes performed by job spent in I/O. If file path and rank are passed, data is further filtered. :param filepath: Filters data by filename :param rank: Filters data by rank :return: Returns I/O in bytes. """ self._throw_if_not_loaded() temp_df = self._df if filepath is not None and rank is None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is None: val = 0 if "POSIX" in temp_df['Module'].unique(): val += temp_df['POSIX_BYTES_WRITTEN'].sum() + \ temp_df['POSIX_BYTES_READ'].sum() if "MPIIO" in temp_df['Module'].unique(): val += temp_df['MPIIO_BYTES_READ'].sum() + \ temp_df['MPIIO_BYTES_WRITTEN'].sum() if "STDIO" in temp_df['Module'].unique(): val += temp_df['STDIO_BYTES_WRITTEN'].sum() + \ temp_df['STDIO_BYTES_READ'].sum() if "H5D" in temp_df['Module'].unique(): val += temp_df['H5D_BYTES_READ'].sum() + \ temp_df['H5D_BYTES_WRITTEN'].sum() return val if self._dxt_df.count()['Module'] == 0: raise Exception(str(ErrorCodes.EC1010)) temp_df = self._dxt_df if filepath is not None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is not None: temp_df = temp_df[temp_df['Rank'].eq(rank)] return temp_df['Length'].sum()
Python
def GetAccessPattern(self, filepath=None): """ Computes the file access pattern for the job. If filepath is passed data is further filtered. If not then all files are aggregated to get the overall access pattern. :param filepath: Filters data by filename :return: Returns Object of I/O access pattern for reads and write """ self._throw_if_not_loaded() if filepath is not None: return {"read": {"total_ops": self._file_access_pattern[filepath]["read"][0], "sequential": self._file_access_pattern[filepath]["read"][1], "consecutive": self._file_access_pattern[filepath]["read"][2] }, "write": {"total_ops": self._file_access_pattern[filepath]["write"][0], "sequential": self._file_access_pattern[filepath]["write"][1], "consecutive": self._file_access_pattern[filepath]["write"][2] } } else: total_seq = [0, 0] total_consecutive = [0, 0] total_ops = [0, 0] for key in self._file_access_pattern: total_ops[0] += int(self._file_access_pattern[key]["read"][0]) total_seq[0] += int(self._file_access_pattern[key]["read"][1]) total_consecutive[0] += int(self._file_access_pattern[key]["read"][2]) total_ops[1] += int(self._file_access_pattern[key]["write"][0]) total_seq[1] += int(self._file_access_pattern[key]["write"][1]) total_consecutive[1] += int(self._file_access_pattern[key]["write"][2]) total_ops = numpy.array(total_ops) total_seq = numpy.array(total_seq) total_consecutive = numpy.array(total_consecutive) return {"read": {"total_ops": total_ops[0], "sequential": total_seq[0], "consecutive": total_consecutive[0] }, "write": {"total_ops": total_ops[1], "sequential": total_seq[1], "consecutive": total_consecutive[1] } }
def GetAccessPattern(self, filepath=None): """ Computes the file access pattern for the job. If filepath is passed data is further filtered. If not then all files are aggregated to get the overall access pattern. :param filepath: Filters data by filename :return: Returns Object of I/O access pattern for reads and write """ self._throw_if_not_loaded() if filepath is not None: return {"read": {"total_ops": self._file_access_pattern[filepath]["read"][0], "sequential": self._file_access_pattern[filepath]["read"][1], "consecutive": self._file_access_pattern[filepath]["read"][2] }, "write": {"total_ops": self._file_access_pattern[filepath]["write"][0], "sequential": self._file_access_pattern[filepath]["write"][1], "consecutive": self._file_access_pattern[filepath]["write"][2] } } else: total_seq = [0, 0] total_consecutive = [0, 0] total_ops = [0, 0] for key in self._file_access_pattern: total_ops[0] += int(self._file_access_pattern[key]["read"][0]) total_seq[0] += int(self._file_access_pattern[key]["read"][1]) total_consecutive[0] += int(self._file_access_pattern[key]["read"][2]) total_ops[1] += int(self._file_access_pattern[key]["write"][0]) total_seq[1] += int(self._file_access_pattern[key]["write"][1]) total_consecutive[1] += int(self._file_access_pattern[key]["write"][2]) total_ops = numpy.array(total_ops) total_seq = numpy.array(total_seq) total_consecutive = numpy.array(total_consecutive) return {"read": {"total_ops": total_ops[0], "sequential": total_seq[0], "consecutive": total_consecutive[0] }, "write": {"total_ops": total_ops[1], "sequential": total_seq[1], "consecutive": total_consecutive[1] } }
Python
def GetFileSizes(self, filepath=None): """ Get size of the files used in the job. :param filepath: Filters by filename :return: returns a map of filenames and size. """ self._throw_if_not_loaded() if filepath is not None: size = pathlib.Path(filepath).stat().st_size return {filepath: size} else: file_size_map = {} for file in self._file_access_pattern: if os.path.exists(file): size = pathlib.Path(file).stat().st_size file = os.path.splitext(ntpath.basename(file))[0] file_size_map[file] = float(size) return file_size_map
def GetFileSizes(self, filepath=None): """ Get size of the files used in the job. :param filepath: Filters by filename :return: returns a map of filenames and size. """ self._throw_if_not_loaded() if filepath is not None: size = pathlib.Path(filepath).stat().st_size return {filepath: size} else: file_size_map = {} for file in self._file_access_pattern: if os.path.exists(file): size = pathlib.Path(file).stat().st_size file = os.path.splitext(ntpath.basename(file))[0] file_size_map[file] = float(size) return file_size_map
Python
def CreateIOTimeline(self, filepath=None, rank=None, time_step=None, save=True, is_print=True): """ Create a timeline for I/O where per timestep, we calculate number of operations and amount of I/O. if filepath is set, data is further filtered by filepath if rank is set, data is further filtered by rank if timestep is not set, it is auto tuned to be the mean io_time within the data :param filepath: filters the data by filename :param rank: filters the data by rank :param time_step: creates size of each timestep. :return: A dataframe consisting of ['timestep','operation_count','io_bytes'] """ self._throw_if_not_loaded() if self._dxt_df.count()['Module'] == 0: raise Exception(str(ErrorCodes.EC1010)) temp_df = self._dxt_df trace_filename = os.path.splitext(ntpath.basename(self._darshan_file))[0] tm_df_filename = "{}/{}_tm_df".format(self._preprocessed_dir, trace_filename) if filepath is not None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] filename = os.path.splitext(ntpath.basename(filepath))[0] tm_df_filename = "{}_{}".format(tm_df_filename, filename) if rank is not None: temp_df = temp_df[temp_df['Rank'].eq(rank)] tm_df_filename = "{}_{}".format(tm_df_filename, rank) tm_df_filename = "{}.csv".format(tm_df_filename) if os.path.exists(tm_df_filename): df_time = pd.read_csv(tm_df_filename) if is_print: print("Loaded Pre-processed Timeline from file: {}".format(tm_df_filename)) return df_time min_time = round(0, 3) max_time = round(self.GetJobTime(), 3) if temp_df['End'].max() > max_time: max_time = temp_df['End'].max() if time_step is None: time_step = round(temp_df['io_time'].mean(), 3) data_points = math.ceil((max_time - min_time) / time_step) data_points_series = numpy.arange(0, data_points, 1) count_series = numpy.zeros(data_points) read_series = numpy.zeros(data_points) read_bytes_series = numpy.zeros(data_points) sum_series = numpy.zeros(data_points) write_series = numpy.zeros(data_points) write_bytes_series = numpy.zeros(data_points) pb_total = temp_df.count()['Module']; i = 1 for index, row in temp_df.iterrows(): if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Creating Timeline') i += 1 start_index = math.floor(float(row['Start']) / time_step) end_index = math.ceil(float(row['End']) / time_step) # print(row['Start'],row['End'],start_index,end_index) for n in numpy.arange(start_index, end_index, 1): if row['Operation'] == "read": read_series[n] += 1 read_bytes_series[n] += float(row['Length']) elif row['Operation'] == "write": write_series[n] += 1 write_bytes_series[n] += float(row['Length']) count_series[n] += 1 sum_series[n] += float(row['Length']) df_time = pd.DataFrame( {'time_step': data_points_series, 'operation_count': count_series, 'io_bytes': sum_series, 'read_bytes': read_bytes_series, 'read_count': read_series, 'write_bytes': write_bytes_series, 'write_count': write_series}) if save: df_time.to_csv(index=False, path_or_buf=tm_df_filename) return df_time
def CreateIOTimeline(self, filepath=None, rank=None, time_step=None, save=True, is_print=True): """ Create a timeline for I/O where per timestep, we calculate number of operations and amount of I/O. if filepath is set, data is further filtered by filepath if rank is set, data is further filtered by rank if timestep is not set, it is auto tuned to be the mean io_time within the data :param filepath: filters the data by filename :param rank: filters the data by rank :param time_step: creates size of each timestep. :return: A dataframe consisting of ['timestep','operation_count','io_bytes'] """ self._throw_if_not_loaded() if self._dxt_df.count()['Module'] == 0: raise Exception(str(ErrorCodes.EC1010)) temp_df = self._dxt_df trace_filename = os.path.splitext(ntpath.basename(self._darshan_file))[0] tm_df_filename = "{}/{}_tm_df".format(self._preprocessed_dir, trace_filename) if filepath is not None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] filename = os.path.splitext(ntpath.basename(filepath))[0] tm_df_filename = "{}_{}".format(tm_df_filename, filename) if rank is not None: temp_df = temp_df[temp_df['Rank'].eq(rank)] tm_df_filename = "{}_{}".format(tm_df_filename, rank) tm_df_filename = "{}.csv".format(tm_df_filename) if os.path.exists(tm_df_filename): df_time = pd.read_csv(tm_df_filename) if is_print: print("Loaded Pre-processed Timeline from file: {}".format(tm_df_filename)) return df_time min_time = round(0, 3) max_time = round(self.GetJobTime(), 3) if temp_df['End'].max() > max_time: max_time = temp_df['End'].max() if time_step is None: time_step = round(temp_df['io_time'].mean(), 3) data_points = math.ceil((max_time - min_time) / time_step) data_points_series = numpy.arange(0, data_points, 1) count_series = numpy.zeros(data_points) read_series = numpy.zeros(data_points) read_bytes_series = numpy.zeros(data_points) sum_series = numpy.zeros(data_points) write_series = numpy.zeros(data_points) write_bytes_series = numpy.zeros(data_points) pb_total = temp_df.count()['Module']; i = 1 for index, row in temp_df.iterrows(): if i % 100 == 0 or i == pb_total: progress(i, pb_total, status='Creating Timeline') i += 1 start_index = math.floor(float(row['Start']) / time_step) end_index = math.ceil(float(row['End']) / time_step) # print(row['Start'],row['End'],start_index,end_index) for n in numpy.arange(start_index, end_index, 1): if row['Operation'] == "read": read_series[n] += 1 read_bytes_series[n] += float(row['Length']) elif row['Operation'] == "write": write_series[n] += 1 write_bytes_series[n] += float(row['Length']) count_series[n] += 1 sum_series[n] += float(row['Length']) df_time = pd.DataFrame( {'time_step': data_points_series, 'operation_count': count_series, 'io_bytes': sum_series, 'read_bytes': read_bytes_series, 'read_count': read_series, 'write_bytes': write_bytes_series, 'write_count': write_series}) if save: df_time.to_csv(index=False, path_or_buf=tm_df_filename) return df_time
Python
def GetIORequestDistribution(self, filepath=None, rank=None, operation=None, bins=100, threshold=AUTO): """ Returns a 2d series of value counts for given bins of io sizes. if filepath is passed, data is filtered by filename bins decide the number of points on x axis of the histogram and threshold can be used to ignore points less than the given threshold. By default threshold is set to 1/1000 of the total sum of counts. :param filepath: filters the data by filepath :param rank: filters the data by rank :param operation: filters the data by operation :param bins: sets the bins for the histogram :param threshold: sets the threshold to ignore on histogram :return: a dataframe object which can be plotted using plot function. """ self._throw_if_not_loaded() if self._dxt_df.count()['Module'] == 0: raise Exception(str(ErrorCodes.EC1010)) temp_df = self._dxt_df if filepath is not None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is not None: temp_df = temp_df[temp_df['Rank'].eq(rank)] if operation is not None: temp_df = temp_df[temp_df['Operation'].eq(operation)] counts = temp_df['Length'].value_counts(bins=bins) if threshold is AUTO: threshold = temp_df['Length'].count() * .001 counts = counts[counts > threshold] return counts.sort_index()
def GetIORequestDistribution(self, filepath=None, rank=None, operation=None, bins=100, threshold=AUTO): """ Returns a 2d series of value counts for given bins of io sizes. if filepath is passed, data is filtered by filename bins decide the number of points on x axis of the histogram and threshold can be used to ignore points less than the given threshold. By default threshold is set to 1/1000 of the total sum of counts. :param filepath: filters the data by filepath :param rank: filters the data by rank :param operation: filters the data by operation :param bins: sets the bins for the histogram :param threshold: sets the threshold to ignore on histogram :return: a dataframe object which can be plotted using plot function. """ self._throw_if_not_loaded() if self._dxt_df.count()['Module'] == 0: raise Exception(str(ErrorCodes.EC1010)) temp_df = self._dxt_df if filepath is not None: temp_df = temp_df[temp_df['Filename'].eq(filepath)] if rank is not None: temp_df = temp_df[temp_df['Rank'].eq(rank)] if operation is not None: temp_df = temp_df[temp_df['Operation'].eq(operation)] counts = temp_df['Length'].value_counts(bins=bins) if threshold is AUTO: threshold = temp_df['Length'].count() * .001 counts = counts[counts > threshold] return counts.sort_index()