language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def trello_model_id_put_with_http_info(self, model, id, **kwargs): # noqa: E501 """Updates the models currently in db. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_model_id_put_with_http_info(model, id, async=True) >>> result = thread.get() :param async bool :param str model: (required) :param str id: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ all_params = ['model', 'id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method trello_model_id_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'model' is set if ('model' not in params or params['model'] is None): raise ValueError("Missing the required parameter `model` when calling `trello_model_id_put`") # noqa: E501 # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `trello_model_id_put`") # noqa: E501 collection_formats = {} path_params = {} if 'model' in params: path_params['model'] = params['model'] # noqa: E501 if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/trello/{model}/{id}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='UniversalResource', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def trello_model_id_put_with_http_info(self, model, id, **kwargs): # noqa: E501 """Updates the models currently in db. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_model_id_put_with_http_info(model, id, async=True) >>> result = thread.get() :param async bool :param str model: (required) :param str id: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ all_params = ['model', 'id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method trello_model_id_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'model' is set if ('model' not in params or params['model'] is None): raise ValueError("Missing the required parameter `model` when calling `trello_model_id_put`") # noqa: E501 # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `trello_model_id_put`") # noqa: E501 collection_formats = {} path_params = {} if 'model' in params: path_params['model'] = params['model'] # noqa: E501 if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/trello/{model}/{id}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='UniversalResource', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Python
def trello_post(self, **kwargs): # noqa: E501 """trello_post # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_post(async=True) >>> result = thread.get() :param async bool :param str key: :param str token: :param TrelloQuery query: :return: int If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.trello_post_with_http_info(**kwargs) # noqa: E501 else: (data) = self.trello_post_with_http_info(**kwargs) # noqa: E501 return data
def trello_post(self, **kwargs): # noqa: E501 """trello_post # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_post(async=True) >>> result = thread.get() :param async bool :param str key: :param str token: :param TrelloQuery query: :return: int If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.trello_post_with_http_info(**kwargs) # noqa: E501 else: (data) = self.trello_post_with_http_info(**kwargs) # noqa: E501 return data
Python
def trello_post_with_http_info(self, **kwargs): # noqa: E501 """trello_post # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_post_with_http_info(async=True) >>> result = thread.get() :param async bool :param str key: :param str token: :param TrelloQuery query: :return: int If the method is called asynchronously, returns the request thread. """ all_params = ['key', 'token', 'query'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method trello_post" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'key' in params: query_params.append(('key', params['key'])) # noqa: E501 if 'token' in params: query_params.append(('token', params['token'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'query' in params: body_params = params['query'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/trello/', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='int', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def trello_post_with_http_info(self, **kwargs): # noqa: E501 """trello_post # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_post_with_http_info(async=True) >>> result = thread.get() :param async bool :param str key: :param str token: :param TrelloQuery query: :return: int If the method is called asynchronously, returns the request thread. """ all_params = ['key', 'token', 'query'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method trello_post" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'key' in params: query_params.append(('key', params['key'])) # noqa: E501 if 'token' in params: query_params.append(('token', params['token'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'query' in params: body_params = params['query'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/trello/', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='int', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Python
def trello_put(self, **kwargs): # noqa: E501 """trello_put # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_put(async=True) >>> result = thread.get() :param async bool :param str key: :param str token: :return: int If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.trello_put_with_http_info(**kwargs) # noqa: E501 else: (data) = self.trello_put_with_http_info(**kwargs) # noqa: E501 return data
def trello_put(self, **kwargs): # noqa: E501 """trello_put # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_put(async=True) >>> result = thread.get() :param async bool :param str key: :param str token: :return: int If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.trello_put_with_http_info(**kwargs) # noqa: E501 else: (data) = self.trello_put_with_http_info(**kwargs) # noqa: E501 return data
Python
def trello_put_with_http_info(self, **kwargs): # noqa: E501 """trello_put # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_put_with_http_info(async=True) >>> result = thread.get() :param async bool :param str key: :param str token: :return: int If the method is called asynchronously, returns the request thread. """ all_params = ['key', 'token'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method trello_put" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'key' in params: query_params.append(('key', params['key'])) # noqa: E501 if 'token' in params: query_params.append(('token', params['token'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/trello/', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='int', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def trello_put_with_http_info(self, **kwargs): # noqa: E501 """trello_put # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.trello_put_with_http_info(async=True) >>> result = thread.get() :param async bool :param str key: :param str token: :return: int If the method is called asynchronously, returns the request thread. """ all_params = ['key', 'token'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method trello_put" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'key' in params: query_params.append(('key', params['key'])) # noqa: E501 if 'token' in params: query_params.append(('token', params['token'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/trello/', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='int', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Python
def content_get(self, path, **kwargs): # noqa: E501 """content_get # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_get(path, async=True) >>> result = thread.get() :param async bool :param str path: Relative path to file (required) :return: ContentFile If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.content_get_with_http_info(path, **kwargs) # noqa: E501 else: (data) = self.content_get_with_http_info(path, **kwargs) # noqa: E501 return data
def content_get(self, path, **kwargs): # noqa: E501 """content_get # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_get(path, async=True) >>> result = thread.get() :param async bool :param str path: Relative path to file (required) :return: ContentFile If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.content_get_with_http_info(path, **kwargs) # noqa: E501 else: (data) = self.content_get_with_http_info(path, **kwargs) # noqa: E501 return data
Python
def content_get_with_http_info(self, path, **kwargs): # noqa: E501 """content_get # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_get_with_http_info(path, async=True) >>> result = thread.get() :param async bool :param str path: Relative path to file (required) :return: ContentFile If the method is called asynchronously, returns the request thread. """ all_params = ['path'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method content_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'path' is set if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `content_get`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'path' in params: query_params.append(('path', params['path'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/content/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ContentFile', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def content_get_with_http_info(self, path, **kwargs): # noqa: E501 """content_get # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_get_with_http_info(path, async=True) >>> result = thread.get() :param async bool :param str path: Relative path to file (required) :return: ContentFile If the method is called asynchronously, returns the request thread. """ all_params = ['path'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method content_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'path' is set if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `content_get`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'path' in params: query_params.append(('path', params['path'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/content/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ContentFile', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Python
def content_post(self, contentfile, **kwargs): # noqa: E501 """content_post # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_post(contentfile, async=True) >>> result = thread.get() :param async bool :param ContentFile contentfile: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.content_post_with_http_info(contentfile, **kwargs) # noqa: E501 else: (data) = self.content_post_with_http_info(contentfile, **kwargs) # noqa: E501 return data
def content_post(self, contentfile, **kwargs): # noqa: E501 """content_post # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_post(contentfile, async=True) >>> result = thread.get() :param async bool :param ContentFile contentfile: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.content_post_with_http_info(contentfile, **kwargs) # noqa: E501 else: (data) = self.content_post_with_http_info(contentfile, **kwargs) # noqa: E501 return data
Python
def content_post_with_http_info(self, contentfile, **kwargs): # noqa: E501 """content_post # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_post_with_http_info(contentfile, async=True) >>> result = thread.get() :param async bool :param ContentFile contentfile: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ all_params = ['contentfile'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method content_post" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'contentfile' is set if ('contentfile' not in params or params['contentfile'] is None): raise ValueError("Missing the required parameter `contentfile` when calling `content_post`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'contentfile' in params: body_params = params['contentfile'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/content/', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='UniversalResource', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def content_post_with_http_info(self, contentfile, **kwargs): # noqa: E501 """content_post # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_post_with_http_info(contentfile, async=True) >>> result = thread.get() :param async bool :param ContentFile contentfile: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ all_params = ['contentfile'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method content_post" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'contentfile' is set if ('contentfile' not in params or params['contentfile'] is None): raise ValueError("Missing the required parameter `contentfile` when calling `content_post`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'contentfile' in params: body_params = params['contentfile'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/content/', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='UniversalResource', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Python
def content_put(self, path, contentfile, **kwargs): # noqa: E501 """content_put # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_put(path, contentfile, async=True) >>> result = thread.get() :param async bool :param str path: Relative path to file (required) :param ContentFile contentfile: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.content_put_with_http_info(path, contentfile, **kwargs) # noqa: E501 else: (data) = self.content_put_with_http_info(path, contentfile, **kwargs) # noqa: E501 return data
def content_put(self, path, contentfile, **kwargs): # noqa: E501 """content_put # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_put(path, contentfile, async=True) >>> result = thread.get() :param async bool :param str path: Relative path to file (required) :param ContentFile contentfile: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.content_put_with_http_info(path, contentfile, **kwargs) # noqa: E501 else: (data) = self.content_put_with_http_info(path, contentfile, **kwargs) # noqa: E501 return data
Python
def content_put_with_http_info(self, path, contentfile, **kwargs): # noqa: E501 """content_put # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_put_with_http_info(path, contentfile, async=True) >>> result = thread.get() :param async bool :param str path: Relative path to file (required) :param ContentFile contentfile: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ all_params = ['path', 'contentfile'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method content_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'path' is set if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `content_put`") # noqa: E501 # verify the required parameter 'contentfile' is set if ('contentfile' not in params or params['contentfile'] is None): raise ValueError("Missing the required parameter `contentfile` when calling `content_put`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'path' in params: query_params.append(('path', params['path'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'contentfile' in params: body_params = params['contentfile'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/content/', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='UniversalResource', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def content_put_with_http_info(self, path, contentfile, **kwargs): # noqa: E501 """content_put # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.content_put_with_http_info(path, contentfile, async=True) >>> result = thread.get() :param async bool :param str path: Relative path to file (required) :param ContentFile contentfile: (required) :return: UniversalResource If the method is called asynchronously, returns the request thread. """ all_params = ['path', 'contentfile'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method content_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'path' is set if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `content_put`") # noqa: E501 # verify the required parameter 'contentfile' is set if ('contentfile' not in params or params['contentfile'] is None): raise ValueError("Missing the required parameter `contentfile` when calling `content_put`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'path' in params: query_params.append(('path', params['path'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'contentfile' in params: body_params = params['contentfile'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/content/', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='UniversalResource', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Python
def memoize(permanent_cache=None): """Cache the return value of the decorated method. :param permanent_cache: a `dict` like object to use as a cache. If not given, the `._cache` attribute would be added to the object of the decorated method pointing to a newly created `dict`. :return: decorated function """ def decorator(method): @wraps(method) def wrapped(self, *args, **kwargs): if permanent_cache is None: try: cache = self._cache except AttributeError: cache = self._cache = {} else: cache = permanent_cache method_cache = cache.setdefault(method, {}) key = frozenset(args), frozenset(kwargs) try: return method_cache[key] except KeyError: rv = method(self, *args, **kwargs) method_cache[key] = rv return rv return wrapped return decorator
def memoize(permanent_cache=None): """Cache the return value of the decorated method. :param permanent_cache: a `dict` like object to use as a cache. If not given, the `._cache` attribute would be added to the object of the decorated method pointing to a newly created `dict`. :return: decorated function """ def decorator(method): @wraps(method) def wrapped(self, *args, **kwargs): if permanent_cache is None: try: cache = self._cache except AttributeError: cache = self._cache = {} else: cache = permanent_cache method_cache = cache.setdefault(method, {}) key = frozenset(args), frozenset(kwargs) try: return method_cache[key] except KeyError: rv = method(self, *args, **kwargs) method_cache[key] = rv return rv return wrapped return decorator
Python
def driver(self): """Return human-friendly driver information :returns: driver information as string """ return '<libvirt>'
def driver(self): """Return human-friendly driver information :returns: driver information as string """ return '<libvirt>'
Python
def uuid(self, identity): """Get computer system UUID The universal unique identifier (UUID) for this system. Can be used in place of system name if there are duplicates. :param identity: libvirt domain name or UUID :returns: computer system UUID """ domain = self._get_domain(identity, readonly=True) return domain.UUIDString()
def uuid(self, identity): """Get computer system UUID The universal unique identifier (UUID) for this system. Can be used in place of system name if there are duplicates. :param identity: libvirt domain name or UUID :returns: computer system UUID """ domain = self._get_domain(identity, readonly=True) return domain.UUIDString()
Python
def name(self, identity): """Get computer system name by name :param identity: libvirt domain name or UUID :returns: computer system name """ domain = self._get_domain(identity, readonly=True) return domain.name()
def name(self, identity): """Get computer system name by name :param identity: libvirt domain name or UUID :returns: computer system name """ domain = self._get_domain(identity, readonly=True) return domain.name()
Python
def _process_bios_attributes(self, domain_xml, bios_attributes=DEFAULT_BIOS_ATTRIBUTES, update_existing_attributes=False): """Process Libvirt domain XML for BIOS attributes This method supports adding default BIOS attributes, retrieving existing BIOS attributes and updating existing BIOS attributes. This method is introduced to make XML testable otherwise have to compare XML strings to test if XML saved to libvirt is as expected. Sample of custom XML: <domain type="kvm"> [...] <metadata xmlns:sushy="http://openstack.org/xmlns/libvirt/sushy"> <sushy:bios> <sushy:attributes> <sushy:attribute name="ProcTurboMode" value="Enabled"/> <sushy:attribute name="BootMode" value="Uefi"/> <sushy:attribute name="NicBoot1" value="NetworkBoot"/> <sushy:attribute name="EmbeddedSata" value="Raid"/> </sushy:attributes> </sushy:bios> </metadata> [...] :param domain_xml: Libvirt domain XML to process :param bios_attributes: BIOS attributes for updates or default values if not specified :param update_existing_attributes: Update existing BIOS attributes :returns: namedtuple of tree: processed XML element tree, attributes_written: if changes were made to XML, bios_attributes: dict of BIOS attributes """ namespace = 'http://openstack.org/xmlns/libvirt/sushy' ET.register_namespace('sushy', namespace) ns = {'sushy': namespace} tree = ET.fromstring(domain_xml) metadata = tree.find('metadata') if metadata is None: metadata = ET.SubElement(tree, 'metadata') bios = metadata.find('sushy:bios', ns) attributes_written = False if bios is not None and update_existing_attributes: metadata.remove(bios) bios = None if bios is None: bios = ET.SubElement(metadata, '{%s}bios' % (namespace)) attributes = ET.SubElement(bios, '{%s}attributes' % (namespace)) for key, value in sorted(bios_attributes.items()): ET.SubElement(attributes, '{%s}attribute' % (namespace), name=key, value=value) attributes_written = True bios_attributes = {atr.attrib['name']: atr.attrib['value'] for atr in tree.find('.//sushy:attributes', ns)} return BiosProcessResult(tree, attributes_written, bios_attributes)
def _process_bios_attributes(self, domain_xml, bios_attributes=DEFAULT_BIOS_ATTRIBUTES, update_existing_attributes=False): """Process Libvirt domain XML for BIOS attributes This method supports adding default BIOS attributes, retrieving existing BIOS attributes and updating existing BIOS attributes. This method is introduced to make XML testable otherwise have to compare XML strings to test if XML saved to libvirt is as expected. Sample of custom XML: <domain type="kvm"> [...] <metadata xmlns:sushy="http://openstack.org/xmlns/libvirt/sushy"> <sushy:bios> <sushy:attributes> <sushy:attribute name="ProcTurboMode" value="Enabled"/> <sushy:attribute name="BootMode" value="Uefi"/> <sushy:attribute name="NicBoot1" value="NetworkBoot"/> <sushy:attribute name="EmbeddedSata" value="Raid"/> </sushy:attributes> </sushy:bios> </metadata> [...] :param domain_xml: Libvirt domain XML to process :param bios_attributes: BIOS attributes for updates or default values if not specified :param update_existing_attributes: Update existing BIOS attributes :returns: namedtuple of tree: processed XML element tree, attributes_written: if changes were made to XML, bios_attributes: dict of BIOS attributes """ namespace = 'http://openstack.org/xmlns/libvirt/sushy' ET.register_namespace('sushy', namespace) ns = {'sushy': namespace} tree = ET.fromstring(domain_xml) metadata = tree.find('metadata') if metadata is None: metadata = ET.SubElement(tree, 'metadata') bios = metadata.find('sushy:bios', ns) attributes_written = False if bios is not None and update_existing_attributes: metadata.remove(bios) bios = None if bios is None: bios = ET.SubElement(metadata, '{%s}bios' % (namespace)) attributes = ET.SubElement(bios, '{%s}attributes' % (namespace)) for key, value in sorted(bios_attributes.items()): ET.SubElement(attributes, '{%s}attribute' % (namespace), name=key, value=value) attributes_written = True bios_attributes = {atr.attrib['name']: atr.attrib['value'] for atr in tree.find('.//sushy:attributes', ns)} return BiosProcessResult(tree, attributes_written, bios_attributes)
Python
def _process_bios(self, identity, bios_attributes=DEFAULT_BIOS_ATTRIBUTES, update_existing_attributes=False): """Process Libvirt domain XML for BIOS attributes and update it if necessary :param identity: libvirt domain name or ID :param bios_attributes: Full list of BIOS attributes to use if they are missing or update necessary :param update_existing_attributes: Update existing BIOS attributes :returns: New or existing dict of BIOS attributes :raises: `error.FishyError` if BIOS attributes cannot be saved """ domain = self._get_domain(identity) result = self._process_bios_attributes(domain.XMLDesc(), bios_attributes, update_existing_attributes) if result.attributes_written: try: with libvirt_open(self._uri) as conn: conn.defineXML(ET.tostring(result.tree).decode('utf-8')) except libvirt.libvirtError as e: msg = ('Error updating BIOS attributes' ' at libvirt URI "%(uri)s": ' '%(error)s' % {'uri': self._uri, 'error': e}) raise error.FishyError(msg) return result.bios_attributes
def _process_bios(self, identity, bios_attributes=DEFAULT_BIOS_ATTRIBUTES, update_existing_attributes=False): """Process Libvirt domain XML for BIOS attributes and update it if necessary :param identity: libvirt domain name or ID :param bios_attributes: Full list of BIOS attributes to use if they are missing or update necessary :param update_existing_attributes: Update existing BIOS attributes :returns: New or existing dict of BIOS attributes :raises: `error.FishyError` if BIOS attributes cannot be saved """ domain = self._get_domain(identity) result = self._process_bios_attributes(domain.XMLDesc(), bios_attributes, update_existing_attributes) if result.attributes_written: try: with libvirt_open(self._uri) as conn: conn.defineXML(ET.tostring(result.tree).decode('utf-8')) except libvirt.libvirtError as e: msg = ('Error updating BIOS attributes' ' at libvirt URI "%(uri)s": ' '%(error)s' % {'uri': self._uri, 'error': e}) raise error.FishyError(msg) return result.bios_attributes
Python
def grid_conf(): """Pytest fixture for grid configuration dictionary""" grid = {"N": 8, "r_min": 0, "r_max": 0.1} return Grid(grid)
def grid_conf(): """Pytest fixture for grid configuration dictionary""" grid = {"N": 8, "r_min": 0, "r_max": 0.1} return Grid(grid)
Python
def copyText(self): """ Copia o texto do editor. """ self.text.clipboard_clear() selected = self.text.get("sel.first", "sel.last") self.text.clipboard_append(selected)
def copyText(self): """ Copia o texto do editor. """ self.text.clipboard_clear() selected = self.text.get("sel.first", "sel.last") self.text.clipboard_append(selected)
Python
def pasteText(self): """ Cola o texto no editor. """ copied = self.text.selection_get(selection='CLIPBOARD') self.text.insert('insert', copied)
def pasteText(self): """ Cola o texto no editor. """ copied = self.text.selection_get(selection='CLIPBOARD') self.text.insert('insert', copied)
Python
def cutText(self): """ Recorta o texto do editor. """ self.copyText() self.text.delete("sel.first", "sel.last")
def cutText(self): """ Recorta o texto do editor. """ self.copyText() self.text.delete("sel.first", "sel.last")
Python
def clearText(self): """ Limpa o texto do editor. """ self.text.delete('1.0', 'end')
def clearText(self): """ Limpa o texto do editor. """ self.text.delete('1.0', 'end')
Python
def load_yaml(yamlpath: str): """ Loads a yaml file from a path. :param yamlpath: Path to yaml settings file :returns: dict settings object """ yamlpath_full = Path(yamlpath).absolute() with open(yamlpath_full, 'r', encoding="utf-8") as stream: try: outdict = yaml.safe_load(stream) return outdict except yaml.YAMLError as exc: print(exc) raise RuntimeError(f"Could not load yaml file at {yamlpath}")
def load_yaml(yamlpath: str): """ Loads a yaml file from a path. :param yamlpath: Path to yaml settings file :returns: dict settings object """ yamlpath_full = Path(yamlpath).absolute() with open(yamlpath_full, 'r', encoding="utf-8") as stream: try: outdict = yaml.safe_load(stream) return outdict except yaml.YAMLError as exc: print(exc) raise RuntimeError(f"Could not load yaml file at {yamlpath}")
Python
def load_properties(multiline, sep='=', comment_char='#', keys=None): """ Read a multiline string of properties (key/value pair separated by *sep*) into a dict :param multiline: input string of properties :param sep: separator between key and value :param comment_char: lines starting with this char are considered comments, not key/value pairs :param keys: list to append the keys to :return: """ props = {} for line in multiline.splitlines(): stripped_line = line.strip() if stripped_line and not stripped_line.startswith(comment_char): key_value = stripped_line.split(sep) key = key_value[0].strip() value = sep.join(key_value[1:]).strip().strip('"') props[key] = value if keys != None: keys.append(key) return props
def load_properties(multiline, sep='=', comment_char='#', keys=None): """ Read a multiline string of properties (key/value pair separated by *sep*) into a dict :param multiline: input string of properties :param sep: separator between key and value :param comment_char: lines starting with this char are considered comments, not key/value pairs :param keys: list to append the keys to :return: """ props = {} for line in multiline.splitlines(): stripped_line = line.strip() if stripped_line and not stripped_line.startswith(comment_char): key_value = stripped_line.split(sep) key = key_value[0].strip() value = sep.join(key_value[1:]).strip().strip('"') props[key] = value if keys != None: keys.append(key) return props
Python
def copy_logfile(logcopy: logging.Logger, dest_dir, case_code: str = None) -> Path: """ Copies the log file from logcopy, to output path pth. Will prefix with case_code if given""" Path(dest_dir).mkdir(parents=False, exist_ok=True) if case_code is not None: new_logname = f"{case_code}_{Path(logcopy.root.handlers[0].baseFilename).name}" else: new_logname = Path(logcopy.root.handlers[0].baseFilename).name copyout = shutil.copy2(logcopy.root.handlers[0].baseFilename, Path(dest_dir, new_logname)) logcopy.info(f"Copied log file to {copyout}") return Path(copyout)
def copy_logfile(logcopy: logging.Logger, dest_dir, case_code: str = None) -> Path: """ Copies the log file from logcopy, to output path pth. Will prefix with case_code if given""" Path(dest_dir).mkdir(parents=False, exist_ok=True) if case_code is not None: new_logname = f"{case_code}_{Path(logcopy.root.handlers[0].baseFilename).name}" else: new_logname = Path(logcopy.root.handlers[0].baseFilename).name copyout = shutil.copy2(logcopy.root.handlers[0].baseFilename, Path(dest_dir, new_logname)) logcopy.info(f"Copied log file to {copyout}") return Path(copyout)
Python
def write_to_file(filename, content, mode, file_time=None): """Helper function that persists content to file.""" with open(filename, mode) as f: f.write(content) if file_time: utime(filename, (file_time, file_time))
def write_to_file(filename, content, mode, file_time=None): """Helper function that persists content to file.""" with open(filename, mode) as f: f.write(content) if file_time: utime(filename, (file_time, file_time))
Python
def parse_arguments() -> Dict: """ Setup the argument parser and parse the command line arguments. """ current_date = datetime.now().strftime('%Y-%m-%d') activities_directory = Path(__path__).parent.joinpath(current_date + '_garmin_connect_export') parser = argparse.ArgumentParser(description='Garmin Connect Exporter') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__, help='print version and exit') parser.add_argument('-v', '--verbosity', action='count', help='increase output verbosity') parser.add_argument('--username', help='your Garmin Connect username or email address (otherwise, you will be prompted)') parser.add_argument('--password', help='your Garmin Connect password (otherwise, you will be prompted)') parser.add_argument('-c', '--count', default='1', help="number of recent activities to download, or 'all' or 'new' (default: 1), " "'new' or 'number' downloads the latest activities by activity's date/time") parser.add_argument('-e', '--external', help='path to external program to pass CSV file too') parser.add_argument('-a', '--external_args', help='additional arguments to pass to external program') parser.add_argument('-f', '--format', choices=['gpx', 'tcx', 'original', 'json'], default='gpx', help="export format; can be 'gpx', 'tcx', 'original' or 'json' (default: 'gpx')") parser.add_argument('-d', '--directory', default=activities_directory, help='the directory to export to (default: \'./YYYY-MM-DD_garmin_connect_export\')') parser.add_argument('-s', "--subdir", help="the subdirectory for activity files (tcx, gpx etc.), supported placeholders are {YYYY} and {MM}" " (default: export directory)") parser.add_argument('-u', '--unzip', action='store_true', help='if downloading ZIP files (format: \'original\'), unzip the file and remove the ZIP file') parser.add_argument('-ot', '--originaltime', action='store_true', help='will set downloaded (and possibly unzipped) file time to the activity start time') parser.add_argument('--desc', type=int, nargs='?', const=0, default=None, help='append the activity\'s description to the file name of the download; limit size if number is given') parser.add_argument('-t', '--template', default=CSV_TEMPLATE, help='template file with desired columns for CSV output') parser.add_argument('-fp', '--fileprefix', action='count', help="set the local time as activity file name prefix") parsed_args = vars(parser.parse_args()) return parsed_args
def parse_arguments() -> Dict: """ Setup the argument parser and parse the command line arguments. """ current_date = datetime.now().strftime('%Y-%m-%d') activities_directory = Path(__path__).parent.joinpath(current_date + '_garmin_connect_export') parser = argparse.ArgumentParser(description='Garmin Connect Exporter') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__, help='print version and exit') parser.add_argument('-v', '--verbosity', action='count', help='increase output verbosity') parser.add_argument('--username', help='your Garmin Connect username or email address (otherwise, you will be prompted)') parser.add_argument('--password', help='your Garmin Connect password (otherwise, you will be prompted)') parser.add_argument('-c', '--count', default='1', help="number of recent activities to download, or 'all' or 'new' (default: 1), " "'new' or 'number' downloads the latest activities by activity's date/time") parser.add_argument('-e', '--external', help='path to external program to pass CSV file too') parser.add_argument('-a', '--external_args', help='additional arguments to pass to external program') parser.add_argument('-f', '--format', choices=['gpx', 'tcx', 'original', 'json'], default='gpx', help="export format; can be 'gpx', 'tcx', 'original' or 'json' (default: 'gpx')") parser.add_argument('-d', '--directory', default=activities_directory, help='the directory to export to (default: \'./YYYY-MM-DD_garmin_connect_export\')') parser.add_argument('-s', "--subdir", help="the subdirectory for activity files (tcx, gpx etc.), supported placeholders are {YYYY} and {MM}" " (default: export directory)") parser.add_argument('-u', '--unzip', action='store_true', help='if downloading ZIP files (format: \'original\'), unzip the file and remove the ZIP file') parser.add_argument('-ot', '--originaltime', action='store_true', help='will set downloaded (and possibly unzipped) file time to the activity start time') parser.add_argument('--desc', type=int, nargs='?', const=0, default=None, help='append the activity\'s description to the file name of the download; limit size if number is given') parser.add_argument('-t', '--template', default=CSV_TEMPLATE, help='template file with desired columns for CSV output') parser.add_argument('-fp', '--fileprefix', action='count', help="set the local time as activity file name prefix") parsed_args = vars(parser.parse_args()) return parsed_args
Python
def logging_verbosity(verbosity): """Adapt logging verbosity, separately for logfile and console output""" logger = logging.getLogger() if not isinstance(verbosity, int): verbosity = 2 for handler in logger.handlers: if isinstance(handler, logging.FileHandler): # this is the logfile handler level = logging.DEBUG if verbosity > 0 else logging.INFO handler.setLevel(level) logging.info('New logfile level: %s', logging.getLevelName(level)) elif isinstance(handler, logging.StreamHandler): # this is the console handler level = logging.DEBUG if verbosity > 1 else (logging.INFO if verbosity > 0 else logging.WARN) handler.setLevel(level) logging.debug('New console log level: %s', logging.getLevelName(level))
def logging_verbosity(verbosity): """Adapt logging verbosity, separately for logfile and console output""" logger = logging.getLogger() if not isinstance(verbosity, int): verbosity = 2 for handler in logger.handlers: if isinstance(handler, logging.FileHandler): # this is the logfile handler level = logging.DEBUG if verbosity > 0 else logging.INFO handler.setLevel(level) logging.info('New logfile level: %s', logging.getLevelName(level)) elif isinstance(handler, logging.StreamHandler): # this is the console handler level = logging.DEBUG if verbosity > 1 else (logging.INFO if verbosity > 0 else logging.WARN) handler.setLevel(level) logging.debug('New console log level: %s', logging.getLevelName(level))
Python
def login(self, session: requests.Session, username: str, password: str) -> str: """ Perform all HTTP requests to login to Garmin Connect. """ username = username or input('Username: ') password = password or getpass() logger.info('Connecting to Garmin Connect...') logger.info('Connecting to %s', self.urls["LOGIN"]) connect_response = session.get(self.urls["LOGIN"], params=LOGIN_PARAMS) # Fields that are passed in a typical Garmin login. post_data = { 'username': username, 'password': password, 'embed': 'false', 'rememberme': 'on' } headers = { 'referer': self.urls["LOGIN"] } logger.info('Requesting Login ticket...') login_req = session.post(self.urls["LOGIN"], params=LOGIN_PARAMS, data=post_data, headers=headers) # Extract the ticket from the login response pattern = re.compile(r".*\?ticket=([-\w]+)\";.*", re.MULTILINE | re.DOTALL) match = pattern.match(login_req.text) if not match: raise RuntimeError('Couldn\'t find ticket in the login response. Cannot log in. ' 'Did you enter the correct username and password?') login_ticket = match.group(1) print(' Done. Ticket=' + login_ticket) print("Authenticating...", end='') logging.info(f"Authentication URL {self.urls['POST_AUTH']}, ticket={login_ticket}") session.get(self.urls["POST_AUTH"], params={'ticket': login_ticket}) return login_ticket
def login(self, session: requests.Session, username: str, password: str) -> str: """ Perform all HTTP requests to login to Garmin Connect. """ username = username or input('Username: ') password = password or getpass() logger.info('Connecting to Garmin Connect...') logger.info('Connecting to %s', self.urls["LOGIN"]) connect_response = session.get(self.urls["LOGIN"], params=LOGIN_PARAMS) # Fields that are passed in a typical Garmin login. post_data = { 'username': username, 'password': password, 'embed': 'false', 'rememberme': 'on' } headers = { 'referer': self.urls["LOGIN"] } logger.info('Requesting Login ticket...') login_req = session.post(self.urls["LOGIN"], params=LOGIN_PARAMS, data=post_data, headers=headers) # Extract the ticket from the login response pattern = re.compile(r".*\?ticket=([-\w]+)\";.*", re.MULTILINE | re.DOTALL) match = pattern.match(login_req.text) if not match: raise RuntimeError('Couldn\'t find ticket in the login response. Cannot log in. ' 'Did you enter the correct username and password?') login_ticket = match.group(1) print(' Done. Ticket=' + login_ticket) print("Authenticating...", end='') logging.info(f"Authentication URL {self.urls['POST_AUTH']}, ticket={login_ticket}") session.get(self.urls["POST_AUTH"], params={'ticket': login_ticket}) return login_ticket
Python
def extract_device(self, device_dict: Dict, activity_details: Dict): """ Try to get the device activity_details (and cache them, as they're used for multiple activities) """ if "metadataDTO" not in activity_details: logging.warning("no metadataDTO") return None else: metadata = activity_details['metadataDTO'] device_app_inst_id = metadata.get('deviceApplicationInstallationId') if device_app_inst_id is not None: if device_app_inst_id not in device_dict: # observed from my stock of activities: # activity_details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == null -> device unknown # activity_details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == '0' -> device unknown # activity_details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == 'someid' -> device known device_dict[device_app_inst_id] = None device_meta = metadata.get('deviceMetaDataDTO') device_id = device_meta.get('deviceId') if 'deviceId' not in device_meta or device_id and device_id != '0': device_json_req = self.session.get(self.urls["DEVICE"] + str(device_app_inst_id)) # export_dir.joinpath(f"device_{device_app_inst_id}.json").write_text(device_json_req.text) if device_json_req.ok is False: logging.warning(f"Device Details {device_app_inst_id} are empty") device_dict[device_app_inst_id] = "device-id:" + str(device_app_inst_id) else: device_details = json.loads(device_json_req.text) if 'productDisplayName' in device_details: device_dict[device_app_inst_id] = device_details['productDisplayName'] + ' ' \ + device_details['versionString'] else: logging.warning(f"Device activity_details {device_app_inst_id} incomplete") return device_dict[device_app_inst_id] else: return None
def extract_device(self, device_dict: Dict, activity_details: Dict): """ Try to get the device activity_details (and cache them, as they're used for multiple activities) """ if "metadataDTO" not in activity_details: logging.warning("no metadataDTO") return None else: metadata = activity_details['metadataDTO'] device_app_inst_id = metadata.get('deviceApplicationInstallationId') if device_app_inst_id is not None: if device_app_inst_id not in device_dict: # observed from my stock of activities: # activity_details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == null -> device unknown # activity_details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == '0' -> device unknown # activity_details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == 'someid' -> device known device_dict[device_app_inst_id] = None device_meta = metadata.get('deviceMetaDataDTO') device_id = device_meta.get('deviceId') if 'deviceId' not in device_meta or device_id and device_id != '0': device_json_req = self.session.get(self.urls["DEVICE"] + str(device_app_inst_id)) # export_dir.joinpath(f"device_{device_app_inst_id}.json").write_text(device_json_req.text) if device_json_req.ok is False: logging.warning(f"Device Details {device_app_inst_id} are empty") device_dict[device_app_inst_id] = "device-id:" + str(device_app_inst_id) else: device_details = json.loads(device_json_req.text) if 'productDisplayName' in device_details: device_dict[device_app_inst_id] = device_details['productDisplayName'] + ' ' \ + device_details['versionString'] else: logging.warning(f"Device activity_details {device_app_inst_id} incomplete") return device_dict[device_app_inst_id] else: return None
Python
def load_gear(self, activity_id: str): """ Retrieve the gear/equipment for an activity """ gear_req = self.session.get(f"{self.urls['GEAR']}/{activity_id}") if gear_req.ok is True: params = {"activityId": activity_id} gear = json.loads(gear_req.text) if len(gear) > 0: gear_display_name = gear[0].get('displayName') gear_model = gear[0].get('customMakeModel') logging.debug("Gear for %s = %s/%s", activity_id, gear_display_name, gear_model) return gear_display_name if gear_display_name else gear_model else: return None else: logging.debug(f"Unable to get gear for activity_id {activity_id}") return None
def load_gear(self, activity_id: str): """ Retrieve the gear/equipment for an activity """ gear_req = self.session.get(f"{self.urls['GEAR']}/{activity_id}") if gear_req.ok is True: params = {"activityId": activity_id} gear = json.loads(gear_req.text) if len(gear) > 0: gear_display_name = gear[0].get('displayName') gear_model = gear[0].get('customMakeModel') logging.debug("Gear for %s = %s/%s", activity_id, gear_display_name, gear_model) return gear_display_name if gear_display_name else gear_model else: return None else: logging.debug(f"Unable to get gear for activity_id {activity_id}") return None
Python
def download_activity(self, activity_details: Dict, start_time_locale, format: str = "ORIGINAL"): """ Write the data of the activity to a file, depending on the chosen data format """ allowed_formats = ["ORIGINAL", "GPX", "JSON"] if format not in allowed_formats: raise ValueError(f"format '{format}' not recognised. Must be one of {allowed_formats}") activity_id = activity_details["activityId"] download_url = f"{self.urls[f'{format.upper()}_ACTIVITY']}/{activity_id}" download_params = {"full": "true"} start_time_locale = get_valid_filename(start_time_locale) # Remove illegal characters if format != "ORIGINAL": download_filename = self.export_dir.joinpath(f"{start_time_locale}_{activity_id}.{format}") else: download_filename = self.export_dir.joinpath(f"{start_time_locale}_{activity_id}.zip") if download_filename.exists(): logger.debug(f"Skipping already-existing file: {download_filename}") return False if format != 'JSON': dl_req = self.session.get(download_url, params=download_params) # Handle expected (though unfortunate) error codes; die on unexpected ones. if dl_req.status_code == 404 and format == "ORIGINAL": # For manual activities (i.e., entered in online without a file upload), there is # no original file. Write an empty file to prevent redownloading it. logging.info('Writing empty file since there was no original activity data...') raw_data = b'' elif dl_req.ok is False: raise Exception('Failed. Got an HTTP error ' + str(dl_req.status_code) + ' for ' + download_url) else: raw_data = dl_req.content else: raw_data = activity_details Path(download_filename).absolute().write_bytes(raw_data) # Persist file if format == 'ORIGINAL': # Even manual upload of a GPX file is zipped, but we'll validate the extension. file_size = stat(download_filename).st_size logging.debug(f"Unzipping and removing original file, size is {file_size}") if file_size > 0: with open(download_filename, "rb") as zip_file: zip_obj = zipfile.ZipFile(zip_file) for name in zip_obj.namelist(): unzipped_name = Path(zip_obj.extract(name, self.export_dir)) file_type = unzipped_name.suffix new_name = download_filename.with_suffix(file_type) logging.debug(f"Renaming {unzipped_name} to {new_name}") unzipped_name.rename(new_name) else: logger.warning(f"Skipping 0Kb zip file for activity_id {activity_id}") Path(download_filename).unlink() return True
def download_activity(self, activity_details: Dict, start_time_locale, format: str = "ORIGINAL"): """ Write the data of the activity to a file, depending on the chosen data format """ allowed_formats = ["ORIGINAL", "GPX", "JSON"] if format not in allowed_formats: raise ValueError(f"format '{format}' not recognised. Must be one of {allowed_formats}") activity_id = activity_details["activityId"] download_url = f"{self.urls[f'{format.upper()}_ACTIVITY']}/{activity_id}" download_params = {"full": "true"} start_time_locale = get_valid_filename(start_time_locale) # Remove illegal characters if format != "ORIGINAL": download_filename = self.export_dir.joinpath(f"{start_time_locale}_{activity_id}.{format}") else: download_filename = self.export_dir.joinpath(f"{start_time_locale}_{activity_id}.zip") if download_filename.exists(): logger.debug(f"Skipping already-existing file: {download_filename}") return False if format != 'JSON': dl_req = self.session.get(download_url, params=download_params) # Handle expected (though unfortunate) error codes; die on unexpected ones. if dl_req.status_code == 404 and format == "ORIGINAL": # For manual activities (i.e., entered in online without a file upload), there is # no original file. Write an empty file to prevent redownloading it. logging.info('Writing empty file since there was no original activity data...') raw_data = b'' elif dl_req.ok is False: raise Exception('Failed. Got an HTTP error ' + str(dl_req.status_code) + ' for ' + download_url) else: raw_data = dl_req.content else: raw_data = activity_details Path(download_filename).absolute().write_bytes(raw_data) # Persist file if format == 'ORIGINAL': # Even manual upload of a GPX file is zipped, but we'll validate the extension. file_size = stat(download_filename).st_size logging.debug(f"Unzipping and removing original file, size is {file_size}") if file_size > 0: with open(download_filename, "rb") as zip_file: zip_obj = zipfile.ZipFile(zip_file) for name in zip_obj.namelist(): unzipped_name = Path(zip_obj.extract(name, self.export_dir)) file_type = unzipped_name.suffix new_name = download_filename.with_suffix(file_type) logging.debug(f"Renaming {unzipped_name} to {new_name}") unzipped_name.rename(new_name) else: logger.warning(f"Skipping 0Kb zip file for activity_id {activity_id}") Path(download_filename).unlink() return True
Python
def main(**kwargs): """ Main entry point for gcexport.py """ setup_logging() if len(kwargs) > 0: args = kwargs for key, value in kwargs.items(): logging.debug(f"arg : {key} | value : {value}") else: args = parse_arguments() logging_verbosity(args.get("verbosity")) print('Welcome to Garmin Connect Exporter!') # Create directory for data files. export_dir = Path(args.get("export_dir")) export_dir.mkdir(parents=True, exist_ok=True) export_csv = export_dir.joinpath("activities.csv") garmin_connect = GarminConnect(username=args.get("username"), password=args.get("password"), export_dir=export_dir) activities = garmin_connect.get_activities(count="all") logger.debug(pprint(garmin_connect.userstats, width=200)) logger.info(f"Export completed to {export_dir}")
def main(**kwargs): """ Main entry point for gcexport.py """ setup_logging() if len(kwargs) > 0: args = kwargs for key, value in kwargs.items(): logging.debug(f"arg : {key} | value : {value}") else: args = parse_arguments() logging_verbosity(args.get("verbosity")) print('Welcome to Garmin Connect Exporter!') # Create directory for data files. export_dir = Path(args.get("export_dir")) export_dir.mkdir(parents=True, exist_ok=True) export_csv = export_dir.joinpath("activities.csv") garmin_connect = GarminConnect(username=args.get("username"), password=args.get("password"), export_dir=export_dir) activities = garmin_connect.get_activities(count="all") logger.debug(pprint(garmin_connect.userstats, width=200)) logger.info(f"Export completed to {export_dir}")
Python
def args_2_list_files(args): """Gather file names for ref. sequences.""" if len(args) > 1: lst_files = args else: if os.path.isdir(args[0]): lst_files = [os.path.join(args[0], f) for f in os.listdir(args[0])] else: lst_files = args return(lst_files)
def args_2_list_files(args): """Gather file names for ref. sequences.""" if len(args) > 1: lst_files = args else: if os.path.isdir(args[0]): lst_files = [os.path.join(args[0], f) for f in os.listdir(args[0])] else: lst_files = args return(lst_files)
Python
def natsortkey(*args, rev_ix=[]): """Natural sorting of a string. For example: exon12 would come before exon2 with a regular sort, with natural sort the order would be exon2, exon12. """ class reversor: def __init__(self, obj): self.obj = obj def __eq__(self, other): return other.obj == self.obj def __lt__(self, other): return self.obj > other.obj convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_split = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] split = lambda *l: tuple(alphanum_split(x) for x in l) reverse = lambda l, ix: tuple(reversor(x) if i in ix else x for i, x in enumerate(l)) return reverse(split(*args), rev_ix)
def natsortkey(*args, rev_ix=[]): """Natural sorting of a string. For example: exon12 would come before exon2 with a regular sort, with natural sort the order would be exon2, exon12. """ class reversor: def __init__(self, obj): self.obj = obj def __eq__(self, other): return other.obj == self.obj def __lt__(self, other): return self.obj > other.obj convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_split = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] split = lambda *l: tuple(alphanum_split(x) for x in l) reverse = lambda l, ix: tuple(reversor(x) if i in ix else x for i, x in enumerate(l)) return reverse(split(*args), rev_ix)
Python
def _get_paths(self, start, w): """Follow path from start until exhaustion of all kmers. Arguments --------- start : int ID of initial node to start with. w : numpy.ndarray A 2-dimensional weighted array containing weight values for each pair of edges. Returns ------- numpy.ndarray List of preceding nodes as dictated by the weights """ # a list of kmers (indices) and their corresponding # predecessors (values) prev = np.empty(self.n, dtype=np.int32) prev.fill(-1) # dist will contain the cumulative weights of forward # kmers as they appear during kmer walking dist = np.empty(self.n, dtype=np.float32) dist.fill(np.inf) unvisited = np.ones(self.n, dtype=bool) def visit(i): # ndist is the weights for all kmers forward of `i` + # the current cumulative weight at the stage of kmer `i` ndist = w[i, :] + dist[i] # only keep forward kmers which: # - were not selected before (np.inf in dist) # - are currently in a reference edge even if they # were already found in a variant edge, their # value in dist will be overwritten by the lower # weight and their previous kmer will be the current # kmer `i`. ind = tuple([ndist < dist]) dist[ind] = ndist[ind] prev[ind] = i # ensure we start with the start kmer, at this point dist # is filled with +np.inf except for `start` which is 0 dist[start] = 0 while any(unvisited): # select passed over kmer with smallest weight from # previous iterations unv_ix = np.where(unvisited)[0] i = unv_ix[dist[unv_ix].argmin()] # find and annotate forward kmers for kmer `i` visit(i) unvisited[i] = False return prev
def _get_paths(self, start, w): """Follow path from start until exhaustion of all kmers. Arguments --------- start : int ID of initial node to start with. w : numpy.ndarray A 2-dimensional weighted array containing weight values for each pair of edges. Returns ------- numpy.ndarray List of preceding nodes as dictated by the weights """ # a list of kmers (indices) and their corresponding # predecessors (values) prev = np.empty(self.n, dtype=np.int32) prev.fill(-1) # dist will contain the cumulative weights of forward # kmers as they appear during kmer walking dist = np.empty(self.n, dtype=np.float32) dist.fill(np.inf) unvisited = np.ones(self.n, dtype=bool) def visit(i): # ndist is the weights for all kmers forward of `i` + # the current cumulative weight at the stage of kmer `i` ndist = w[i, :] + dist[i] # only keep forward kmers which: # - were not selected before (np.inf in dist) # - are currently in a reference edge even if they # were already found in a variant edge, their # value in dist will be overwritten by the lower # weight and their previous kmer will be the current # kmer `i`. ind = tuple([ndist < dist]) dist[ind] = ndist[ind] prev[ind] = i # ensure we start with the start kmer, at this point dist # is filled with +np.inf except for `start` which is 0 dist[start] = 0 while any(unvisited): # select passed over kmer with smallest weight from # previous iterations unv_ix = np.where(unvisited)[0] i = unv_ix[dist[unv_ix].argmin()] # find and annotate forward kmers for kmer `i` visit(i) unvisited[i] = False return prev
Python
def _get_shortest(self, a, b): """Return shortest path passing through edge (a, b)""" def follow(start, prev): cur = start while prev[cur] != -1: cur = prev[cur] path.append(cur) path = [b, a] follow(a, self.before) path.reverse() follow(b, self.after) ## Only keep paths from source to sink if path[0] != self.first_node or path[-1] != self.last_node: path = None return path
def _get_shortest(self, a, b): """Return shortest path passing through edge (a, b)""" def follow(start, prev): cur = start while prev[cur] != -1: cur = prev[cur] path.append(cur) path = [b, a] follow(a, self.before) path.reverse() follow(b, self.after) ## Only keep paths from source to sink if path[0] != self.first_node or path[-1] != self.last_node: path = None return path
Python
def all_shortest(self): """Find every unique path that can be constructed from a non-reference edge by following the paths in `before` and `after`. Returns ------- list List of unique shortest paths walked from all edges """ log.info("%d edges in non-ref edge set.", len(self.edge_set)) all_paths = set() for (i, j) in self.edge_set: log.debug("Computing shortest path through edge: (%d, %d)", i, j) path = self._get_shortest(i, j) if path: all_paths.add(tuple(path)) return list(all_paths)
def all_shortest(self): """Find every unique path that can be constructed from a non-reference edge by following the paths in `before` and `after`. Returns ------- list List of unique shortest paths walked from all edges """ log.info("%d edges in non-ref edge set.", len(self.edge_set)) all_paths = set() for (i, j) in self.edge_set: log.debug("Computing shortest path through edge: (%d, %d)", i, j) path = self._get_shortest(i, j) if path: all_paths.add(tuple(path)) return list(all_paths)
Python
def graph_analysis(self): """Perform kmer walking and find alternative paths Generate a 2-dimensional graph with all kmers queried at initialization. Nodes represent individual kmers and weighted edges are used to do the walking by prioritizing alternative paths. """ self.paths = [] # Initialize graph graph = ug.Graph(self.num_k) # For all kmers, find the next kmers with k - 1 overlap # and assign a weight of 1 weight = 1 # Match contiguous kmers prefix_dct = {} for i in range(self.num_k): prefix = self.kmer[i][:-1] try: prefix_dct[prefix].add(i) except KeyError: prefix_dct[prefix] = set([i]) for i in range(self.num_k): suffix = self.kmer[i][1:] try: matches = prefix_dct[suffix] except KeyError: matches = set() for j in matches: if i != j: # useful? graph[i, j] = weight # Change the weights for contiguous kmers in reference # from 1 to 0.01 in graph weight = 0.01 def adjust_graph_weights(ref_index): for k in range(len(ref_index)-1): i = ref_index[k] j = ref_index[k+1] graph[i, j] = weight adjust_graph_weights(self.refpath.seq_index) first_ix = self.kmer.index(self.first_seq) for start_ix in self.start_kmers_ix: graph[first_ix, start_ix] = weight last_ix = self.kmer.index(self.last_seq) for end_ix in self.end_kmers_ix: graph[end_ix, last_ix] = weight # Initialize paths and remove reference edges graph.init_paths( self.kmer.index(self.first_seq), self.kmer.index(self.last_seq) ) # Locate shortest paths from non-reference edges short_paths = graph.all_shortest() # remove capping nodes from paths short_paths = [tuple(p[1:-1]) for p in short_paths] self.alt_paths = [us.AltSeq(s, self) for s in short_paths] # Group alternative paths with the same origin (ref_name) together alt_groups = {} for path in self.alt_paths: try: alt_groups[path.ref_name].append(path) except KeyError: alt_groups[path.ref_name] = [path] self.alt_groups = alt_groups
def graph_analysis(self): """Perform kmer walking and find alternative paths Generate a 2-dimensional graph with all kmers queried at initialization. Nodes represent individual kmers and weighted edges are used to do the walking by prioritizing alternative paths. """ self.paths = [] # Initialize graph graph = ug.Graph(self.num_k) # For all kmers, find the next kmers with k - 1 overlap # and assign a weight of 1 weight = 1 # Match contiguous kmers prefix_dct = {} for i in range(self.num_k): prefix = self.kmer[i][:-1] try: prefix_dct[prefix].add(i) except KeyError: prefix_dct[prefix] = set([i]) for i in range(self.num_k): suffix = self.kmer[i][1:] try: matches = prefix_dct[suffix] except KeyError: matches = set() for j in matches: if i != j: # useful? graph[i, j] = weight # Change the weights for contiguous kmers in reference # from 1 to 0.01 in graph weight = 0.01 def adjust_graph_weights(ref_index): for k in range(len(ref_index)-1): i = ref_index[k] j = ref_index[k+1] graph[i, j] = weight adjust_graph_weights(self.refpath.seq_index) first_ix = self.kmer.index(self.first_seq) for start_ix in self.start_kmers_ix: graph[first_ix, start_ix] = weight last_ix = self.kmer.index(self.last_seq) for end_ix in self.end_kmers_ix: graph[end_ix, last_ix] = weight # Initialize paths and remove reference edges graph.init_paths( self.kmer.index(self.first_seq), self.kmer.index(self.last_seq) ) # Locate shortest paths from non-reference edges short_paths = graph.all_shortest() # remove capping nodes from paths short_paths = [tuple(p[1:-1]) for p in short_paths] self.alt_paths = [us.AltSeq(s, self) for s in short_paths] # Group alternative paths with the same origin (ref_name) together alt_groups = {} for path in self.alt_paths: try: alt_groups[path.ref_name].append(path) except KeyError: alt_groups[path.ref_name] = [path] self.alt_groups = alt_groups
Python
def quantify_paths(self, graphical=False): """Quantify paths independently. Go through shortest paths one by one and quantify their expression. The result from the quantification method will be used in the final output. After quantification, paths are formatted and compiled into their final format for printing. Parameters ---------- graphical : bool, optional If True generate a plot showing kmer coverage. """ if graphical: import matplotlib.pyplot as plt def plot(paths): plt.figure(figsize=(10, 6)) for path in paths: ref_name, ref_index, alt_index = path.ref_name, path.ref_index, path.seq_index plt.plot( self.get_counts(alt_index), label=self.get_name( ref_index, alt_index ).replace("\t", " ") +\ ' (%s)' % ref_name ) plt.legend() plt.show() if graphical: for paths in self.alt_groups.values(): plot(paths) for path in self.alt_paths: ref_name, ref_index, alt_index = path.ref_name, path.ref_index, path.seq_index quant = upq.PathQuant( all_paths=[alt_index, ref_index], counts=self.counts ) quant.compute_coef() quant.refine_coef() quant.get_ratio() # Reference if alt_index == ref_index: quant.adjust_for_reference() rvaf, ref_rvaf = quant.rVAF coef, ref_coef = quant.coef path_o = upq.Path( self.jf.filename, ref_name, self.get_name(ref_index, alt_index), rvaf, coef, min(self.get_counts(alt_index)), 0, self.get_seq(alt_index, skip_prefix=False), ref_rvaf, ref_coef, self.get_seq(ref_index, skip_prefix=False), "vs_ref" ) self.paths.append(path_o)
def quantify_paths(self, graphical=False): """Quantify paths independently. Go through shortest paths one by one and quantify their expression. The result from the quantification method will be used in the final output. After quantification, paths are formatted and compiled into their final format for printing. Parameters ---------- graphical : bool, optional If True generate a plot showing kmer coverage. """ if graphical: import matplotlib.pyplot as plt def plot(paths): plt.figure(figsize=(10, 6)) for path in paths: ref_name, ref_index, alt_index = path.ref_name, path.ref_index, path.seq_index plt.plot( self.get_counts(alt_index), label=self.get_name( ref_index, alt_index ).replace("\t", " ") +\ ' (%s)' % ref_name ) plt.legend() plt.show() if graphical: for paths in self.alt_groups.values(): plot(paths) for path in self.alt_paths: ref_name, ref_index, alt_index = path.ref_name, path.ref_index, path.seq_index quant = upq.PathQuant( all_paths=[alt_index, ref_index], counts=self.counts ) quant.compute_coef() quant.refine_coef() quant.get_ratio() # Reference if alt_index == ref_index: quant.adjust_for_reference() rvaf, ref_rvaf = quant.rVAF coef, ref_coef = quant.coef path_o = upq.Path( self.jf.filename, ref_name, self.get_name(ref_index, alt_index), rvaf, coef, min(self.get_counts(alt_index)), 0, self.get_seq(alt_index, skip_prefix=False), ref_rvaf, ref_coef, self.get_seq(ref_index, skip_prefix=False), "vs_ref" ) self.paths.append(path_o)
Python
def _find_clusters(self, alt_paths): """Generate clusters by cutting the sequence around mutations considering overlapping mutations as a cluster. """ variant_diffs = [] variant_set = set(range(0, len(alt_paths))) for path in alt_paths: ref_name, ref_index, alt_index = path.ref_name, path.ref_index, path.seq_index diff = self.diff_path_without_overlap( ref_index, alt_index, self.jf.k ) variant_diffs.append(diff) def get_intersect(start, stop): for var in variant_set: cur_start = variant_diffs[var].start cur_end = variant_diffs[var].end_ref if cur_end >= start and cur_start <= stop: if start == stop == cur_start == cur_end: log.info('Terminal ITD ignored in cluster mode.') elif stop == cur_end and (start == stop or cur_start == cur_end): # meaning one is the reference and the other ends at ref end, # which can happen when the ITD instantly checks at end # extremities because it ends at `< end - k` log.info('Quasi-terminal ITD ignored in cluster mode.') else: return var return -1 variant_groups = [] while len(variant_set) > 0: seed = variant_set.pop() grp = [seed] start = variant_diffs[seed].start stop = variant_diffs[seed].end_ref variant = get_intersect(start, stop) while variant != -1: variant_set.remove(variant) grp += [variant] start = min(start, variant_diffs[variant].start) stop = max(stop, variant_diffs[variant].end_ref) variant = get_intersect(start, stop) variant_groups.append((start, stop, grp)) # we know they all share the same reference ref_index = alt_paths[0].ref_index ref_name = alt_paths[0].ref_name for var_gr in variant_groups: start, stop, grp_ixs = var_gr if len(grp_ixs) == 1: var = grp_ixs[0] path = alt_paths[var] if path.seq_index == path.ref_index: continue var_diffs = [variant_diffs[v] for v in grp_ixs] var_size = max( [abs(d.end_var - d.end_ref + 1) for d in var_diffs] ) offset = max(0, start - var_size) ref_path = tuple(ref_index[offset:stop]) clipped_paths = [] for var in grp_ixs: cur_diff = variant_diffs[var] stop_off = cur_diff.end_var + stop - cur_diff.end_ref new_path = tuple(alt_paths[var][offset:stop_off]) clipped_paths.append(new_path) yield (ref_name, ref_path, clipped_paths, offset)
def _find_clusters(self, alt_paths): """Generate clusters by cutting the sequence around mutations considering overlapping mutations as a cluster. """ variant_diffs = [] variant_set = set(range(0, len(alt_paths))) for path in alt_paths: ref_name, ref_index, alt_index = path.ref_name, path.ref_index, path.seq_index diff = self.diff_path_without_overlap( ref_index, alt_index, self.jf.k ) variant_diffs.append(diff) def get_intersect(start, stop): for var in variant_set: cur_start = variant_diffs[var].start cur_end = variant_diffs[var].end_ref if cur_end >= start and cur_start <= stop: if start == stop == cur_start == cur_end: log.info('Terminal ITD ignored in cluster mode.') elif stop == cur_end and (start == stop or cur_start == cur_end): # meaning one is the reference and the other ends at ref end, # which can happen when the ITD instantly checks at end # extremities because it ends at `< end - k` log.info('Quasi-terminal ITD ignored in cluster mode.') else: return var return -1 variant_groups = [] while len(variant_set) > 0: seed = variant_set.pop() grp = [seed] start = variant_diffs[seed].start stop = variant_diffs[seed].end_ref variant = get_intersect(start, stop) while variant != -1: variant_set.remove(variant) grp += [variant] start = min(start, variant_diffs[variant].start) stop = max(stop, variant_diffs[variant].end_ref) variant = get_intersect(start, stop) variant_groups.append((start, stop, grp)) # we know they all share the same reference ref_index = alt_paths[0].ref_index ref_name = alt_paths[0].ref_name for var_gr in variant_groups: start, stop, grp_ixs = var_gr if len(grp_ixs) == 1: var = grp_ixs[0] path = alt_paths[var] if path.seq_index == path.ref_index: continue var_diffs = [variant_diffs[v] for v in grp_ixs] var_size = max( [abs(d.end_var - d.end_ref + 1) for d in var_diffs] ) offset = max(0, start - var_size) ref_path = tuple(ref_index[offset:stop]) clipped_paths = [] for var in grp_ixs: cur_diff = variant_diffs[var] stop_off = cur_diff.end_var + stop - cur_diff.end_ref new_path = tuple(alt_paths[var][offset:stop_off]) clipped_paths.append(new_path) yield (ref_name, ref_path, clipped_paths, offset)
Python
def quantify_clusters(self, graphical=False): """Detect and quantify cluster groups. In some cases, the complete sequence will contain at least 2 homozygous mutations, which causes the overall minimumn coverage to be 0 for all paths. By defining clusters, we only keep the part of the sequence where there is only one mutation. Additinally, when multiple mutations overlap, they are grouped together into a cluster in order to get quantified as a single group. Then, go through clusters and quantify them in groups. The rest is similar to `quantify_paths`. After quantification, paths are formatted and compiled into their final format for printing. Parameters ---------- graphical : bool, optional If True generate a plot showing kmer coverage for each cluster. """ clusters = [] for common_ref in sorted(self.alt_groups.keys(), key=lambda x: uc.natsortkey(x)): alt_paths = self.alt_groups[common_ref] for cluster in self._find_clusters(alt_paths): clusters.append(cluster) if graphical: import matplotlib.pyplot as plt for i, cluster in enumerate(clusters): ref_name, ref_path, clipped_paths, start_off = cluster num_cluster = i + 1 if graphical: plt.figure(figsize=(10, 6)) plt.plot( self.get_counts(ref_path), label="Reference" ) for path in clipped_paths: assert path != ref_path plt.plot( self.get_counts(path), label=self.get_name( ref_path, path, start_off ).split("\t")[0] ) plt.legend() plt.show() quant = upq.PathQuant( all_paths=[ref_path] + clipped_paths, counts=self.counts ) quant.compute_coef() quant.refine_coef() quant.get_ratio() ref_rvaf, paths_rvaf = quant.rVAF[0], quant.rVAF[1:] ref_coef, paths_coef = quant.coef[0], quant.coef[1:] for path, rvaf, coef in zip(clipped_paths, paths_rvaf, paths_coef): assert path != ref_path path_o = upq.Path( self.jf.filename, ref_name, self.get_name(ref_path, path, start_off), rvaf, coef, min(self.get_counts(path)), start_off, self.get_seq(path, skip_prefix=False), ref_rvaf, ref_coef, self.get_seq(ref_path, skip_prefix=False), "cluster %d n=%d" % (num_cluster, len(clipped_paths)) ) self.paths.append(path_o)
def quantify_clusters(self, graphical=False): """Detect and quantify cluster groups. In some cases, the complete sequence will contain at least 2 homozygous mutations, which causes the overall minimumn coverage to be 0 for all paths. By defining clusters, we only keep the part of the sequence where there is only one mutation. Additinally, when multiple mutations overlap, they are grouped together into a cluster in order to get quantified as a single group. Then, go through clusters and quantify them in groups. The rest is similar to `quantify_paths`. After quantification, paths are formatted and compiled into their final format for printing. Parameters ---------- graphical : bool, optional If True generate a plot showing kmer coverage for each cluster. """ clusters = [] for common_ref in sorted(self.alt_groups.keys(), key=lambda x: uc.natsortkey(x)): alt_paths = self.alt_groups[common_ref] for cluster in self._find_clusters(alt_paths): clusters.append(cluster) if graphical: import matplotlib.pyplot as plt for i, cluster in enumerate(clusters): ref_name, ref_path, clipped_paths, start_off = cluster num_cluster = i + 1 if graphical: plt.figure(figsize=(10, 6)) plt.plot( self.get_counts(ref_path), label="Reference" ) for path in clipped_paths: assert path != ref_path plt.plot( self.get_counts(path), label=self.get_name( ref_path, path, start_off ).split("\t")[0] ) plt.legend() plt.show() quant = upq.PathQuant( all_paths=[ref_path] + clipped_paths, counts=self.counts ) quant.compute_coef() quant.refine_coef() quant.get_ratio() ref_rvaf, paths_rvaf = quant.rVAF[0], quant.rVAF[1:] ref_coef, paths_coef = quant.coef[0], quant.coef[1:] for path, rvaf, coef in zip(clipped_paths, paths_rvaf, paths_coef): assert path != ref_path path_o = upq.Path( self.jf.filename, ref_name, self.get_name(ref_path, path, start_off), rvaf, coef, min(self.get_counts(path)), start_off, self.get_seq(path, skip_prefix=False), ref_rvaf, ref_coef, self.get_seq(ref_path, skip_prefix=False), "cluster %d n=%d" % (num_cluster, len(clipped_paths)) ) self.paths.append(path_o)
Python
def query(self, seq): """Fetch kmer count data from database.""" kmer = jellyfish.MerDNA(seq) if (self.canonical): kmer.canonicalize() return self.jf[kmer]
def query(self, seq): """Fetch kmer count data from database.""" kmer = jellyfish.MerDNA(seq) if (self.canonical): kmer.canonicalize() return self.jf[kmer]
Python
def str_fields(self, tab_count=1): # type: (int) -> str """ Convert the list of fields into string and pad it with appropriate number of tabs in the left. :param tab_count: Number of tabs to insert in the left :return: The concatenated string we generate """ field_str_list = [] for field in self.fields: field_str_list.append((tab_count * TAB) + str(field)) # Return the concatenated string return '\n'.join(field_str_list) if field_str_list else 'pass'
def str_fields(self, tab_count=1): # type: (int) -> str """ Convert the list of fields into string and pad it with appropriate number of tabs in the left. :param tab_count: Number of tabs to insert in the left :return: The concatenated string we generate """ field_str_list = [] for field in self.fields: field_str_list.append((tab_count * TAB) + str(field)) # Return the concatenated string return '\n'.join(field_str_list) if field_str_list else 'pass'
Python
def _likely_classes(module_globals): # type: (ModuleGlobals) -> Generator[str, None, None] """ Extract likely Protobuf class names from all global variables * Ignore any global variable starting with `_`. * Should not be `sys` or `DESCRIPTOR`. :param module_globals: Astroid :return: """ # A list of ignored global variable names ignored_vars = {'sys', 'DESCRIPTOR'} # Run through the global variables, eliminate all those # that don't meet our criteria. for k in module_globals: if k.startswith('_') or k in ignored_vars: continue logging.debug('Yielding likely class %s', k) yield k
def _likely_classes(module_globals): # type: (ModuleGlobals) -> Generator[str, None, None] """ Extract likely Protobuf class names from all global variables * Ignore any global variable starting with `_`. * Should not be `sys` or `DESCRIPTOR`. :param module_globals: Astroid :return: """ # A list of ignored global variable names ignored_vars = {'sys', 'DESCRIPTOR'} # Run through the global variables, eliminate all those # that don't meet our criteria. for k in module_globals: if k.startswith('_') or k in ignored_vars: continue logging.debug('Yielding likely class %s', k) yield k
Python
def _extract_enum_field(call): # type: (astroid.Call) -> Optional[PBField] """ Extract enum fields from the `EnumValueDescriptor` astroid object :param call: Reference to the `astroid.Call` object :return: A `PBField` object on success, None on failure """ field_name = None field_number = None for keyword in call.keywords: if keyword.arg == 'name': field_name = getattr(keyword.value, 'value', None) elif keyword.arg == 'number': field_number = next(keyword.value.infer()).value # Sanity check on whether we were able to extract the name and value? if field_name is None: logging.warning('Unable to extract enum field name: %s', call) return None if field_number is None: logging.warning('Unable to extract enum field value: %s', call) return None # Done, return the field return PBEnumField(field_name, False, field_number)
def _extract_enum_field(call): # type: (astroid.Call) -> Optional[PBField] """ Extract enum fields from the `EnumValueDescriptor` astroid object :param call: Reference to the `astroid.Call` object :return: A `PBField` object on success, None on failure """ field_name = None field_number = None for keyword in call.keywords: if keyword.arg == 'name': field_name = getattr(keyword.value, 'value', None) elif keyword.arg == 'number': field_number = next(keyword.value.infer()).value # Sanity check on whether we were able to extract the name and value? if field_name is None: logging.warning('Unable to extract enum field name: %s', call) return None if field_number is None: logging.warning('Unable to extract enum field value: %s', call) return None # Done, return the field return PBEnumField(field_name, False, field_number)
Python
def _extract_custom_field_value(field_name, classes, module): # type: (str, List[PBClass], astroid.Module) -> Optional[PBClass] """ 2nd Pass: Populate the custom Protobuf classes in the message fields :param field_name: Name of the field :param classes: List of `PBClass` objects :param module: Parent module object given to us by PyLint :return: Nothing """ for node in module.body: # Filter out any non-assignment nodes if not isinstance(node, astroid.Assign): logging.debug('Node is not of type Assign: %s', node) continue # Get the target from the assignment node if not node.targets: logging.debug('Node target length is too short: %s', node) continue target_node = node.targets[0] if not isinstance(target_node, astroid.AssignAttr): logging.debug('Node target is not AssignAttr: %s', target_node) continue # Get the expression from the target expr_node = target_node.expr if not isinstance(expr_node, astroid.Subscript): logging.debug('Node target expr is not Subscript: %s', expr_node) continue # Get the slice from the expression slice_node = expr_node.slice if not isinstance(slice_node, astroid.Index): logging.debug('Node slice is not Index: %s', slice_node) continue # Only a constant value is expected inside the slice node if not isinstance(slice_node.value, astroid.Const): logging.debug('Slice node is not a Const: %s', slice_node.value) continue # Get the the value out of this (assigned class) expr_value = expr_node.value if not isinstance(expr_value, astroid.Attribute): logging.debug('Expr value is not an Attribute: %s', expr_value) continue expr_expr = expr_value.expr if not isinstance(expr_expr, astroid.Name): logging.debug('Expr expr is not a Name: %s', expr_expr) continue # Extract the field name present in the slice node slice_node_name = slice_node.value.value if not isinstance(slice_node_name, str): logging.warning("Slice name is not a str: %s", slice_node_name) continue if field_name != slice_node_name: logging.debug('Slice node name %s does ' 'not match field name %s' '', slice_node_name, field_name) continue # Extract the descriptor name for this field name field_desc_name = node.value.name # Get the PBClass object for this field from its descriptor return next(c for c in classes if c.desc_name == field_desc_name) # Unfortunately unable to determine the complex type logging.warning('Unable to determine the complex' ' type for field: %s', field_name) return None
def _extract_custom_field_value(field_name, classes, module): # type: (str, List[PBClass], astroid.Module) -> Optional[PBClass] """ 2nd Pass: Populate the custom Protobuf classes in the message fields :param field_name: Name of the field :param classes: List of `PBClass` objects :param module: Parent module object given to us by PyLint :return: Nothing """ for node in module.body: # Filter out any non-assignment nodes if not isinstance(node, astroid.Assign): logging.debug('Node is not of type Assign: %s', node) continue # Get the target from the assignment node if not node.targets: logging.debug('Node target length is too short: %s', node) continue target_node = node.targets[0] if not isinstance(target_node, astroid.AssignAttr): logging.debug('Node target is not AssignAttr: %s', target_node) continue # Get the expression from the target expr_node = target_node.expr if not isinstance(expr_node, astroid.Subscript): logging.debug('Node target expr is not Subscript: %s', expr_node) continue # Get the slice from the expression slice_node = expr_node.slice if not isinstance(slice_node, astroid.Index): logging.debug('Node slice is not Index: %s', slice_node) continue # Only a constant value is expected inside the slice node if not isinstance(slice_node.value, astroid.Const): logging.debug('Slice node is not a Const: %s', slice_node.value) continue # Get the the value out of this (assigned class) expr_value = expr_node.value if not isinstance(expr_value, astroid.Attribute): logging.debug('Expr value is not an Attribute: %s', expr_value) continue expr_expr = expr_value.expr if not isinstance(expr_expr, astroid.Name): logging.debug('Expr expr is not a Name: %s', expr_expr) continue # Extract the field name present in the slice node slice_node_name = slice_node.value.value if not isinstance(slice_node_name, str): logging.warning("Slice name is not a str: %s", slice_node_name) continue if field_name != slice_node_name: logging.debug('Slice node name %s does ' 'not match field name %s' '', slice_node_name, field_name) continue # Extract the descriptor name for this field name field_desc_name = node.value.name # Get the PBClass object for this field from its descriptor return next(c for c in classes if c.desc_name == field_desc_name) # Unfortunately unable to determine the complex type logging.warning('Unable to determine the complex' ' type for field: %s', field_name) return None
Python
def _extract_message_field(classes, module, call): # type: (List[PBClass], astroid.Module, astroid.Call) -> Optional[PBField] """ Extract a message field from the `FieldDescriptor` call :param classes: List of all `PBClass` objects :param module: Reference to `astroid.Module` object :param call: An `astroid.Call` belonging to `FieldDescriptor` call :return: PBField for the corresponding field """ field_name = None field_pb_type = None field_default = None for keyword in call.keywords: if keyword.arg == 'name': field_name = getattr(keyword.value, 'value', None) if keyword.arg == 'type': field_pb_type = getattr(keyword.value, 'value', None) if keyword.arg == 'default_value': field_default = next(keyword.value.infer()) if field_name is None: logging.debug("Unable to find field name: %s", call.keywords) return None if field_pb_type is None: logging.debug("Unable to find field type: %s", call.keywords) return None if field_default is None: logging.debug("Unable to find field default: %s", call.keywords) return None if field_pb_type in COMPLEX_FIELDS: # type: Optional[PBClass] field_value = _extract_custom_field_value(field_name, classes, module) field_type = PBFieldType.COMPLEX else: try: # type: Scalar field_value = FIELD_TYPES[field_pb_type] except KeyError: logging.debug("Unknown field type: %s", field_pb_type) return None field_type = PBFieldType.SCALAR # We're done inferring about this field. return PBMessageField(field_name, _is_repeated_field(field_default), field_type, field_value)
def _extract_message_field(classes, module, call): # type: (List[PBClass], astroid.Module, astroid.Call) -> Optional[PBField] """ Extract a message field from the `FieldDescriptor` call :param classes: List of all `PBClass` objects :param module: Reference to `astroid.Module` object :param call: An `astroid.Call` belonging to `FieldDescriptor` call :return: PBField for the corresponding field """ field_name = None field_pb_type = None field_default = None for keyword in call.keywords: if keyword.arg == 'name': field_name = getattr(keyword.value, 'value', None) if keyword.arg == 'type': field_pb_type = getattr(keyword.value, 'value', None) if keyword.arg == 'default_value': field_default = next(keyword.value.infer()) if field_name is None: logging.debug("Unable to find field name: %s", call.keywords) return None if field_pb_type is None: logging.debug("Unable to find field type: %s", call.keywords) return None if field_default is None: logging.debug("Unable to find field default: %s", call.keywords) return None if field_pb_type in COMPLEX_FIELDS: # type: Optional[PBClass] field_value = _extract_custom_field_value(field_name, classes, module) field_type = PBFieldType.COMPLEX else: try: # type: Scalar field_value = FIELD_TYPES[field_pb_type] except KeyError: logging.debug("Unknown field type: %s", field_pb_type) return None field_type = PBFieldType.SCALAR # We're done inferring about this field. return PBMessageField(field_name, _is_repeated_field(field_default), field_type, field_value)
Python
def _extract_desc_fields(classes, module, parent_call, class_type): # type: (List[PBClass], astroid.Module, astroid.Call, PBClassType) -> List[PBField] """ Extract fields present in the given a `astroid.Call` object for a `Descriptor` call :param classes: List of `PBClass` objects :param module: Reference to `astroid.Module` object :param parent_call: Creation of descriptor via `astroid.Call` :param class_type: Class type of the parent descriptor :return: Extracted fields present in the protobuf class """ # The call to create a field object descriptor will have # `fields` for messages and `values` for enums. if class_type == PBClassType.MESSAGE: kw_name = 'fields' elif class_type == PBClassType.ENUM: kw_name = 'values' else: logging.warning("Unknown Class Type: %s", class_type) return [] # Extract the values passed to `fields`/`value` keyword # in the parent call to create a descriptor. kw_val = next(kw for kw in parent_call.keywords if kw.arg == kw_name) if not isinstance(kw_val, astroid.Keyword): logging.warning('kw_val is not a Keyword object: %s', kw_val) return [] if not isinstance(kw_val.value, astroid.List): logging.warning('Fields value is not a List: %s', kw_val.value) return [] # Extract the list of calls for each keyword in the `kw_name` values calls = kw_val.value.elts fields = [] for call in calls: if class_type == PBClassType.ENUM: # type: Optional[PBField] enum_field = _extract_enum_field(call) if enum_field: fields.append(enum_field) else: # This is a message field # type: Optional[PBField] msg_field = _extract_message_field(classes, module, call) if msg_field: fields.append(msg_field) # Done, return the extracted fields return fields
def _extract_desc_fields(classes, module, parent_call, class_type): # type: (List[PBClass], astroid.Module, astroid.Call, PBClassType) -> List[PBField] """ Extract fields present in the given a `astroid.Call` object for a `Descriptor` call :param classes: List of `PBClass` objects :param module: Reference to `astroid.Module` object :param parent_call: Creation of descriptor via `astroid.Call` :param class_type: Class type of the parent descriptor :return: Extracted fields present in the protobuf class """ # The call to create a field object descriptor will have # `fields` for messages and `values` for enums. if class_type == PBClassType.MESSAGE: kw_name = 'fields' elif class_type == PBClassType.ENUM: kw_name = 'values' else: logging.warning("Unknown Class Type: %s", class_type) return [] # Extract the values passed to `fields`/`value` keyword # in the parent call to create a descriptor. kw_val = next(kw for kw in parent_call.keywords if kw.arg == kw_name) if not isinstance(kw_val, astroid.Keyword): logging.warning('kw_val is not a Keyword object: %s', kw_val) return [] if not isinstance(kw_val.value, astroid.List): logging.warning('Fields value is not a List: %s', kw_val.value) return [] # Extract the list of calls for each keyword in the `kw_name` values calls = kw_val.value.elts fields = [] for call in calls: if class_type == PBClassType.ENUM: # type: Optional[PBField] enum_field = _extract_enum_field(call) if enum_field: fields.append(enum_field) else: # This is a message field # type: Optional[PBField] msg_field = _extract_message_field(classes, module, call) if msg_field: fields.append(msg_field) # Done, return the extracted fields return fields
Python
def _populate_fields(classes, module): # type: (List[PBClass], astroid.Module) -> None """ Populate all the fields in the messages. :param classes: List of `PBClass` objects :param module: Reference to `astroid.Module` object :return: Nothing """ # Module globals module_globals = module.globals # Enumerate through the classes for cls in classes: # Get the assignment for the descriptor try: # type: List[astroid.Assign] assign_line = module_globals[cls.desc_name] except KeyError: logging.error('Descriptor %s for class' ' %s was not found', cls.desc_name, cls.name) continue # The assignment line should have at least one assignment if not assign_line: logging.warning('Descriptor assignment %s ' 'has no nodes', cls.desc_name) continue # Get the assignment node itself assign_node = assign_line[0] if not isinstance(assign_node, astroid.AssignName): logging.warning('Descriptor assign_node is ' 'not AssignName: %s', assign_node) continue # Parent should be an `astroid.Assign object if not isinstance(assign_node.parent, astroid.Assign): logging.warning('Descriptor assign_node parent ' 'is not Assign: %s', assign_node.parent) continue call_node = assign_node.parent.value if not isinstance(call_node, astroid.Call): logging.warning('Assign\'s value is not a Call: %s', call_node) continue # Extract the fields fields = _extract_desc_fields(classes, module, call_node, cls.type) # Append it to the fields cls.fields.extend(fields)
def _populate_fields(classes, module): # type: (List[PBClass], astroid.Module) -> None """ Populate all the fields in the messages. :param classes: List of `PBClass` objects :param module: Reference to `astroid.Module` object :return: Nothing """ # Module globals module_globals = module.globals # Enumerate through the classes for cls in classes: # Get the assignment for the descriptor try: # type: List[astroid.Assign] assign_line = module_globals[cls.desc_name] except KeyError: logging.error('Descriptor %s for class' ' %s was not found', cls.desc_name, cls.name) continue # The assignment line should have at least one assignment if not assign_line: logging.warning('Descriptor assignment %s ' 'has no nodes', cls.desc_name) continue # Get the assignment node itself assign_node = assign_line[0] if not isinstance(assign_node, astroid.AssignName): logging.warning('Descriptor assign_node is ' 'not AssignName: %s', assign_node) continue # Parent should be an `astroid.Assign object if not isinstance(assign_node.parent, astroid.Assign): logging.warning('Descriptor assign_node parent ' 'is not Assign: %s', assign_node.parent) continue call_node = assign_node.parent.value if not isinstance(call_node, astroid.Call): logging.warning('Assign\'s value is not a Call: %s', call_node) continue # Extract the fields fields = _extract_desc_fields(classes, module, call_node, cls.type) # Append it to the fields cls.fields.extend(fields)
Python
def _transform(node): # type: (astroid.Module) -> astroid.Module """ Callback function registered with PyLint to transform a particular node. :param node: An `astroid.Module` node :return: """ # Build a mapping of all the classes in the Protobuf module. # First identify the classes and its descriptors classes = _extract_classes(node.globals) # Populate the fields in these classes _populate_fields(classes, node) # Generate the import statements imports_str = '\n'.join('import ' + imp for imp in IMPORTS) # Generate the classes corresponding to protobuf messages & enums classes_str = '\n\n'.join(str(cls) for cls in classes) # Combine the above two to create code code = '\n\n'.join([imports_str, classes_str]) # Copy some fields from the old node to the new node new_node = astroid.parse(code) new_node.name = node.name new_node.doc = node.doc new_node.file = node.file new_node.path = node.path new_node.package = node.package new_node.pure_python = node.pure_python new_node.parent = node.parent # Debug print(new_node.as_string()) # Return the new node created by us return new_node
def _transform(node): # type: (astroid.Module) -> astroid.Module """ Callback function registered with PyLint to transform a particular node. :param node: An `astroid.Module` node :return: """ # Build a mapping of all the classes in the Protobuf module. # First identify the classes and its descriptors classes = _extract_classes(node.globals) # Populate the fields in these classes _populate_fields(classes, node) # Generate the import statements imports_str = '\n'.join('import ' + imp for imp in IMPORTS) # Generate the classes corresponding to protobuf messages & enums classes_str = '\n\n'.join(str(cls) for cls in classes) # Combine the above two to create code code = '\n\n'.join([imports_str, classes_str]) # Copy some fields from the old node to the new node new_node = astroid.parse(code) new_node.name = node.name new_node.doc = node.doc new_node.file = node.file new_node.path = node.path new_node.package = node.package new_node.pure_python = node.pure_python new_node.parent = node.parent # Debug print(new_node.as_string()) # Return the new node created by us return new_node
Python
def _looks_like_pb2(node): # type: (astroid.Module) -> bool """ Predicate function that determines when PyLint has to call our plugin's `_transform` callback on a node. :param node: An `astroid.Module` node :return: None """ # Keep a list of ignored `_pb2` module names ignored = { 'google.protobuf.descriptor_pb2' } # Filter out everything that doesn't end with `_pb2` return node.qname().endswith("_pb2") and node.qname() not in ignored
def _looks_like_pb2(node): # type: (astroid.Module) -> bool """ Predicate function that determines when PyLint has to call our plugin's `_transform` callback on a node. :param node: An `astroid.Module` node :return: None """ # Keep a list of ignored `_pb2` module names ignored = { 'google.protobuf.descriptor_pb2' } # Filter out everything that doesn't end with `_pb2` return node.qname().endswith("_pb2") and node.qname() not in ignored
Python
def register(_): # type: (Any) -> None """ Register this plugin with the PyLint framework and perform any initialization needed by this plugin """
def register(_): # type: (Any) -> None """ Register this plugin with the PyLint framework and perform any initialization needed by this plugin """
Python
def fmt_phone(ph): """ Format a (French) phone number """ ph = re.sub('^(\+?33\s(?:\(?0\)?))', '0', ph) return re.sub('(?<=\d)[-. ](?=\d)', '.', ph).strip()
def fmt_phone(ph): """ Format a (French) phone number """ ph = re.sub('^(\+?33\s(?:\(?0\)?))', '0', ph) return re.sub('(?<=\d)[-. ](?=\d)', '.', ph).strip()
Python
def text(el): """ Helper to get the text content of a BeautifulSoup item """ return el.get_text().strip()
def text(el): """ Helper to get the text content of a BeautifulSoup item """ return el.get_text().strip()
Python
def mk_fuzzy(p): """ Return the 'fuzzy' field of a person dict. This is a string containing various versions of the person's name for easier searches. """ els = [] els.append(p['name']) els.append(unidecode(p['name'])) if 'url' in p: urlname = re.search('/~(\w+)', p['url']) if urlname: els.append(urlname.group(1)) # siglum, e.g. Foo Bar-Qux -> fbq sig = ''.join(re.findall(u'[A-ZÉ]', p['name'])) if sig: els.append(sig) # join with non-\w symbol to avoid cross-matching return ' # '.join(els)
def mk_fuzzy(p): """ Return the 'fuzzy' field of a person dict. This is a string containing various versions of the person's name for easier searches. """ els = [] els.append(p['name']) els.append(unidecode(p['name'])) if 'url' in p: urlname = re.search('/~(\w+)', p['url']) if urlname: els.append(urlname.group(1)) # siglum, e.g. Foo Bar-Qux -> fbq sig = ''.join(re.findall(u'[A-ZÉ]', p['name'])) if sig: els.append(sig) # join with non-\w symbol to avoid cross-matching return ' # '.join(els)
Python
def soup_url(url): """ Get an HTML document from an URL, and return its (beautiful) soup """ req = Request(url, headers={'User-Agent': 'p7pp/web'}) html = urlopen(req).read() return BeautifulSoup(html, "lxml")
def soup_url(url): """ Get an HTML document from an URL, and return its (beautiful) soup """ req = Request(url, headers={'User-Agent': 'p7pp/web'}) html = urlopen(req).read() return BeautifulSoup(html, "lxml")
Python
def mk_people_key(org, url): """ Make a key for a person, using their org and their webpage URL """ path = urlparse(url).path m = re.match(r'^/~(\w+)', path) key = m.group(1) if m else re.sub(r'\W', '_', path) return "people.%s.%s" % (org, key)
def mk_people_key(org, url): """ Make a key for a person, using their org and their webpage URL """ path = urlparse(url).path m = re.match(r'^/~(\w+)', path) key = m.group(1) if m else re.sub(r'\W', '_', path) return "people.%s.%s" % (org, key)
Python
def parse_liafa(): """ Return a dict of people from LIAFA. """ print "parsing LIAFA..." icon = 'liafa' people = {} base = 'http://www.liafa.univ-paris-diderot.fr/' tr_sel = 'blockquote > table tr.fondgristresc' # td:first-child a' souper = soup_url(urljoin(base, '/web9/membreliafa/listalpha_fr.php')) for tr in souper.select(tr_sel): links = tr.select('td a') if (len(links) == 0): continue u = links[0].get('href') if u is None: continue p = {} tds = tr.select('td.texte') if len(tds) >= 2: p['info'] = '' office = text(tds[1]) phone = text(tds[0]) if office and phone and (office != '-' or phone != '-'): p['info'] = 'Office ' + office + ', phone: ' + fmt_phone(phone) souper = soup_url(base + u) pp = souper.select('table.texte li a.bleu') if (pp): pp = pp[0] p['url'] = urljoin(base, pp.get('href')) p['name'] = fmt_name(text(souper.select('blockquote h2')[0])) p['icon'] = icon p['fuzzy'] = mk_fuzzy(p) people[mk_people_key('liafa', p['url'])] = p return people
def parse_liafa(): """ Return a dict of people from LIAFA. """ print "parsing LIAFA..." icon = 'liafa' people = {} base = 'http://www.liafa.univ-paris-diderot.fr/' tr_sel = 'blockquote > table tr.fondgristresc' # td:first-child a' souper = soup_url(urljoin(base, '/web9/membreliafa/listalpha_fr.php')) for tr in souper.select(tr_sel): links = tr.select('td a') if (len(links) == 0): continue u = links[0].get('href') if u is None: continue p = {} tds = tr.select('td.texte') if len(tds) >= 2: p['info'] = '' office = text(tds[1]) phone = text(tds[0]) if office and phone and (office != '-' or phone != '-'): p['info'] = 'Office ' + office + ', phone: ' + fmt_phone(phone) souper = soup_url(base + u) pp = souper.select('table.texte li a.bleu') if (pp): pp = pp[0] p['url'] = urljoin(base, pp.get('href')) p['name'] = fmt_name(text(souper.select('blockquote h2')[0])) p['icon'] = icon p['fuzzy'] = mk_fuzzy(p) people[mk_people_key('liafa', p['url'])] = p return people
Python
def parse_pps(): """ Return a dict of people from PPS """ print "parsing PPS..." icon = 'pps' people = {} base = 'http://www.pps.univ-paris-diderot.fr' souper = soup_url(base + '/membres') trs = souper.select('#contenu2 table')[0].find_all('tr')[1:] for tr in trs: link = tr.find('a') if not link: continue p = {} p['url'] = urljoin(base, link.get('href')) p['name'] = fmt_name(text(link)) p['fuzzy'] = mk_fuzzy(p) p['icon'] = icon tds = tr.find_all('td') if (len(tds) >= 4): p['info'] = '' office = text(tds[2]) phone = text(tds[3]) if office and phone and (office != '-' or phone != '-'): p['info'] = 'Office ' + office \ + ', phone: ' + fmt_phone('01 45 27 ' + phone) people[mk_people_key('pps', p['url'])] = p print "parsing PPS (pi.r2)..." souper = soup_url(base + '/pi.r2/Members') lis = souper.select('.members')[0].find_all('li') for li in lis: link = li.find('a') name = li.find('strong') if not link or not name: continue p = {} p['url'] = link.get('href') p['name'] = fmt_name(text(name)) p['fuzzy'] = mk_fuzzy(p) p['icon'] = icon p['info'] = '' # we keep the same key to avoid potential duplicates key = mk_people_key('pps', p['url']) if key in people: print "%s is already at PPS, skipping" % p['name'] continue people[key] = p return people
def parse_pps(): """ Return a dict of people from PPS """ print "parsing PPS..." icon = 'pps' people = {} base = 'http://www.pps.univ-paris-diderot.fr' souper = soup_url(base + '/membres') trs = souper.select('#contenu2 table')[0].find_all('tr')[1:] for tr in trs: link = tr.find('a') if not link: continue p = {} p['url'] = urljoin(base, link.get('href')) p['name'] = fmt_name(text(link)) p['fuzzy'] = mk_fuzzy(p) p['icon'] = icon tds = tr.find_all('td') if (len(tds) >= 4): p['info'] = '' office = text(tds[2]) phone = text(tds[3]) if office and phone and (office != '-' or phone != '-'): p['info'] = 'Office ' + office \ + ', phone: ' + fmt_phone('01 45 27 ' + phone) people[mk_people_key('pps', p['url'])] = p print "parsing PPS (pi.r2)..." souper = soup_url(base + '/pi.r2/Members') lis = souper.select('.members')[0].find_all('li') for li in lis: link = li.find('a') name = li.find('strong') if not link or not name: continue p = {} p['url'] = link.get('href') p['name'] = fmt_name(text(name)) p['fuzzy'] = mk_fuzzy(p) p['icon'] = icon p['info'] = '' # we keep the same key to avoid potential duplicates key = mk_people_key('pps', p['url']) if key in people: print "%s is already at PPS, skipping" % p['name'] continue people[key] = p return people
Python
def parse_gallium(): """ Return a dict of people from Gallium. Only a part of them are teaching at Paris Diderot. """ print "parsing Gallium..." icon = 'inria' people = {} base = 'http://gallium.inria.fr' souper = soup_url(base + '/members.html') links = souper.select('#columnA_2columns a') for link in links: p = {'name': text(link), 'url': urljoin(base, link.get('href'))} p['icon'] = icon p['fuzzy'] = mk_fuzzy(p) people[mk_people_key('gallium', p['url'])] = p return people
def parse_gallium(): """ Return a dict of people from Gallium. Only a part of them are teaching at Paris Diderot. """ print "parsing Gallium..." icon = 'inria' people = {} base = 'http://gallium.inria.fr' souper = soup_url(base + '/members.html') links = souper.select('#columnA_2columns a') for link in links: p = {'name': text(link), 'url': urljoin(base, link.get('href'))} p['icon'] = icon p['fuzzy'] = mk_fuzzy(p) people[mk_people_key('gallium', p['url'])] = p return people
Python
def parse_others(): """ Return a dict of manually-added people """ return {}
def parse_others(): """ Return a dict of manually-added people """ return {}
Python
def save_list(): """ Save the list of people, as a JSON hash. """ redis.set('people.json', json.dumps(parse_all()))
def save_list(): """ Save the list of people, as a JSON hash. """ redis.set('people.json', json.dumps(parse_all()))
Python
def read_JSON_data(JSON_filename): """Import and read JSON file to determine speed of sound, sampling frequency, axial samples, beam spacing, and number of beams :param JSON_filename: User specified JSON file for ultrasound image data :return: c: speed of sound :return: fs: sampling frequency in Hz :return: axial_samples: # of samples in the axial direction :return: beam_spacing: distance between beams in meters :return: num_beams: number of beams """ try: with open(JSON_filename, 'r') as f: JSON_dict = json.load(f) logging.debug("JSON file opened") except FileNotFoundError: print("File was not found") logging.error("File was not found") raise FileNotFoundError try: c = float(JSON_dict.get("c")) fs = float(JSON_dict.get("fs")) axial_samples = int(JSON_dict.get("axial_samples")) beam_spacing = float(JSON_dict.get("beam_spacing")) num_beams = int(JSON_dict.get("num_beams")) logging.info("speed of sound (meters/sec) = " + str(c)) logging.info("sampling frequency = " + str(fs)) logging.info("number of axial samples = " + str(axial_samples)) logging.info("beam spacing (in meters) = " + str(beam_spacing)) logging.info("number of beams = " + str(num_beams)) except TypeError: print('One of the expected values was not found') logging.error("One of the expected values is missing") raise TypeError except ValueError: print('One of the expected values is not listed as an integer') logging.error("One of the expected values is not listed as a number") raise ValueError return (c, fs, axial_samples, beam_spacing, num_beams)
def read_JSON_data(JSON_filename): """Import and read JSON file to determine speed of sound, sampling frequency, axial samples, beam spacing, and number of beams :param JSON_filename: User specified JSON file for ultrasound image data :return: c: speed of sound :return: fs: sampling frequency in Hz :return: axial_samples: # of samples in the axial direction :return: beam_spacing: distance between beams in meters :return: num_beams: number of beams """ try: with open(JSON_filename, 'r') as f: JSON_dict = json.load(f) logging.debug("JSON file opened") except FileNotFoundError: print("File was not found") logging.error("File was not found") raise FileNotFoundError try: c = float(JSON_dict.get("c")) fs = float(JSON_dict.get("fs")) axial_samples = int(JSON_dict.get("axial_samples")) beam_spacing = float(JSON_dict.get("beam_spacing")) num_beams = int(JSON_dict.get("num_beams")) logging.info("speed of sound (meters/sec) = " + str(c)) logging.info("sampling frequency = " + str(fs)) logging.info("number of axial samples = " + str(axial_samples)) logging.info("beam spacing (in meters) = " + str(beam_spacing)) logging.info("number of beams = " + str(num_beams)) except TypeError: print('One of the expected values was not found') logging.error("One of the expected values is missing") raise TypeError except ValueError: print('One of the expected values is not listed as an integer') logging.error("One of the expected values is not listed as a number") raise ValueError return (c, fs, axial_samples, beam_spacing, num_beams)
Python
def RF_bars(rf_data, num_beams): """Split radio frequency array into an array or arrays of correct size :param rf_data: imported radio frequency data as a single array of integers :param num_beams: number of beams :return: rfdata_bars: 2D array of RF data of size (number of beams by number of axial samples) """ rfdata_bars = np.split(rf_data, num_beams) logging.debug("RF_data has been split into rows") return rfdata_bars
def RF_bars(rf_data, num_beams): """Split radio frequency array into an array or arrays of correct size :param rf_data: imported radio frequency data as a single array of integers :param num_beams: number of beams :return: rfdata_bars: 2D array of RF data of size (number of beams by number of axial samples) """ rfdata_bars = np.split(rf_data, num_beams) logging.debug("RF_data has been split into rows") return rfdata_bars
Python
def image_plot(B_mode_array, beam_spacing, axial_samples, num_beams, f_s, c): """Calculate Lateral distance and depth of image in meters using data from JSON file and display file using matplotlib :param B_mode_array: completed 2D numpy array of B-mode data that has undergone all image processing :param beam_spacing: distance between beams in meters :param axial_samples: # of samples in the axial direction :param num_beams: number of beams :param f_s: sampling frequency in Hz :param c: speed of sound :return: None """ lateral_distance = beam_spacing * num_beams depth_distance = c * axial_samples / f_s / 2 extent_array = [0, lateral_distance, depth_distance, 0] logging.info("Total Lateral Distance of image is " + str(lateral_distance) + "meters") logging.info("Total Depth of image is " + str(depth_distance) + "meters") plt.imshow(B_mode_array, aspect='auto', extent=extent_array, cmap='Greys_r') plt.title('B-mode Ultrasound Image') plt.xlabel('Depth (m)') plt.ylabel('Distance (m)') logging.debug("B-mode Ultrasound Image is plotted using matplotlib") plt.show()
def image_plot(B_mode_array, beam_spacing, axial_samples, num_beams, f_s, c): """Calculate Lateral distance and depth of image in meters using data from JSON file and display file using matplotlib :param B_mode_array: completed 2D numpy array of B-mode data that has undergone all image processing :param beam_spacing: distance between beams in meters :param axial_samples: # of samples in the axial direction :param num_beams: number of beams :param f_s: sampling frequency in Hz :param c: speed of sound :return: None """ lateral_distance = beam_spacing * num_beams depth_distance = c * axial_samples / f_s / 2 extent_array = [0, lateral_distance, depth_distance, 0] logging.info("Total Lateral Distance of image is " + str(lateral_distance) + "meters") logging.info("Total Depth of image is " + str(depth_distance) + "meters") plt.imshow(B_mode_array, aspect='auto', extent=extent_array, cmap='Greys_r') plt.title('B-mode Ultrasound Image') plt.xlabel('Depth (m)') plt.ylabel('Distance (m)') logging.debug("B-mode Ultrasound Image is plotted using matplotlib") plt.show()
Python
def image_save(image_filename, B_mode_array, beam_spacing, axial_samples, num_beams, f_s, c): """Save B-mode image under specified file name :param image_filename: User specified filename to save B-mode image to :param B_mode_array: completed 2D numpy array of B-mode data that has undergone all image processing :param beam_spacing: distance between beams in meters :param axial_samples: # of samples in the axial direction :param num_beams: number of beams :param f_s: sampling frequency in Hz :param c: speed of sound :return: None """ try: lateral_distance = beam_spacing * num_beams depth_distance = c * axial_samples / f_s / 2 extent_array = [0, lateral_distance, depth_distance, 0] plt.imshow(B_mode_array, aspect='auto', extent=extent_array, cmap='Greys_r') plt.title('B-mode Ultrasound Image') plt.xlabel('Depth (m)') plt.ylabel('Distance (m)') plt.savefig(image_filename) logging.debug("Image is saved under the filename: " + image_filename) except IOError: print('There is no more space, please delete something' ' on your hard-drive') logging.error("There is no space left on harddrive") raise IOError except ValueError: print('You used an nsupported file format. eps, jpeg,' ' jpg, pdf, pgf, png, ps, raw, rgba,' ' svg, svgz, tif, tiff are supported') logging.error("Attempted and failed to save using an" " unsupported filetype") raise ValueError
def image_save(image_filename, B_mode_array, beam_spacing, axial_samples, num_beams, f_s, c): """Save B-mode image under specified file name :param image_filename: User specified filename to save B-mode image to :param B_mode_array: completed 2D numpy array of B-mode data that has undergone all image processing :param beam_spacing: distance between beams in meters :param axial_samples: # of samples in the axial direction :param num_beams: number of beams :param f_s: sampling frequency in Hz :param c: speed of sound :return: None """ try: lateral_distance = beam_spacing * num_beams depth_distance = c * axial_samples / f_s / 2 extent_array = [0, lateral_distance, depth_distance, 0] plt.imshow(B_mode_array, aspect='auto', extent=extent_array, cmap='Greys_r') plt.title('B-mode Ultrasound Image') plt.xlabel('Depth (m)') plt.ylabel('Distance (m)') plt.savefig(image_filename) logging.debug("Image is saved under the filename: " + image_filename) except IOError: print('There is no more space, please delete something' ' on your hard-drive') logging.error("There is no space left on harddrive") raise IOError except ValueError: print('You used an nsupported file format. eps, jpeg,' ' jpg, pdf, pgf, png, ps, raw, rgba,' ' svg, svgz, tif, tiff are supported') logging.error("Attempted and failed to save using an" " unsupported filetype") raise ValueError
Python
def array_filtering(rfdata_bars): """Run envelope detection on individual beams using a lowess filter of a 2D numpy array of RF data :param rfdata_bars: 2D array of RF data of size :return: RF_array_filtered: 2D numpy array of data that has undergone envelope detection """ RF_array_abs = np.absolute(rfdata_bars) RF_array_T = np.transpose(RF_array_abs) RF_array_filtered = np.empty([len(RF_array_T), len(RF_array_T[0])]) x_data = np.array(range(len(RF_array_T))) for i in range(len(RF_array_T[0])): filtered = lowess(RF_array_T[:, i], x_data, frac=0.05) RF_array_filtered[:, i] = filtered[:, 1] logging.debug("RF_data has undergone envelope detection") return RF_array_filtered
def array_filtering(rfdata_bars): """Run envelope detection on individual beams using a lowess filter of a 2D numpy array of RF data :param rfdata_bars: 2D array of RF data of size :return: RF_array_filtered: 2D numpy array of data that has undergone envelope detection """ RF_array_abs = np.absolute(rfdata_bars) RF_array_T = np.transpose(RF_array_abs) RF_array_filtered = np.empty([len(RF_array_T), len(RF_array_T[0])]) x_data = np.array(range(len(RF_array_T))) for i in range(len(RF_array_T[0])): filtered = lowess(RF_array_T[:, i], x_data, frac=0.05) RF_array_filtered[:, i] = filtered[:, 1] logging.debug("RF_data has undergone envelope detection") return RF_array_filtered
Python
def logarithmic_compression(RF_array_filtered): """Run logarithmic compression on a 2D numpy array of RF data :param RF_array_filtered: 2D numpy array of data that has undergone envelope detection :return: log_RFarray_filtered: 2D numpy array of data that has undergone logarithmic compression and envelope detection """ log_RFarray_filtered = np.empty([len(RF_array_filtered), len(RF_array_filtered[0])]) for i in range(len(RF_array_filtered[0])): log_RFarray_filtered[:, i] = np.log10(RF_array_filtered[:, i]) logging.debug("RF_data has undergone logarithmic compression") return log_RFarray_filtered
def logarithmic_compression(RF_array_filtered): """Run logarithmic compression on a 2D numpy array of RF data :param RF_array_filtered: 2D numpy array of data that has undergone envelope detection :return: log_RFarray_filtered: 2D numpy array of data that has undergone logarithmic compression and envelope detection """ log_RFarray_filtered = np.empty([len(RF_array_filtered), len(RF_array_filtered[0])]) for i in range(len(RF_array_filtered[0])): log_RFarray_filtered[:, i] = np.log10(RF_array_filtered[:, i]) logging.debug("RF_data has undergone logarithmic compression") return log_RFarray_filtered
Python
def equalization(log_RFarray_filtered): """"Run histogram equalization on a 2D array of RF data that has undergone envelope detection and logarithmic compression :param log_RFarray_filtered: 2D numpy array of data that has undergone logarithmic compression and envelope detection :return: B_mode_array: completed 2D numpy array of B-mode data that has undergone all image processing """ B_mode_array = np.empty([len(log_RFarray_filtered), len(log_RFarray_filtered[0])]) for i in range(len(log_RFarray_filtered[0])-1): ind = np.where(~np.isnan(log_RFarray_filtered[:, i]))[0] first, last = ind[0], ind[-1] log_RFarray_filtered[:, i][:first] =\ log_RFarray_filtered[:, i][first] log_RFarray_filtered[:, i][last + 1:] =\ log_RFarray_filtered[:, i][last] B_mode_array[:, i] =\ exposure.equalize_hist(log_RFarray_filtered[:, i]) logging.debug("RF_data has undergone histogram equalization") return B_mode_array
def equalization(log_RFarray_filtered): """"Run histogram equalization on a 2D array of RF data that has undergone envelope detection and logarithmic compression :param log_RFarray_filtered: 2D numpy array of data that has undergone logarithmic compression and envelope detection :return: B_mode_array: completed 2D numpy array of B-mode data that has undergone all image processing """ B_mode_array = np.empty([len(log_RFarray_filtered), len(log_RFarray_filtered[0])]) for i in range(len(log_RFarray_filtered[0])-1): ind = np.where(~np.isnan(log_RFarray_filtered[:, i]))[0] first, last = ind[0], ind[-1] log_RFarray_filtered[:, i][:first] =\ log_RFarray_filtered[:, i][first] log_RFarray_filtered[:, i][last + 1:] =\ log_RFarray_filtered[:, i][last] B_mode_array[:, i] =\ exposure.equalize_hist(log_RFarray_filtered[:, i]) logging.debug("RF_data has undergone histogram equalization") return B_mode_array
Python
def doubletons(self) -> int: """ Return the number of doubletons contained in this hand """ #output_dict = dict() #for card in cards_sorted: # output_dict[card.suit.name] = "".join([i.rank.name for i in cards_sorted if i.suit.name == card.suit.name]) return sum(1 for tons in self.dict_rep.values() if len(tons) == 2)
def doubletons(self) -> int: """ Return the number of doubletons contained in this hand """ #output_dict = dict() #for card in cards_sorted: # output_dict[card.suit.name] = "".join([i.rank.name for i in cards_sorted if i.suit.name == card.suit.name]) return sum(1 for tons in self.dict_rep.values() if len(tons) == 2)
Python
def singletons(self) -> int: """ Return the number of singletons contained in this hand """ #output_dict = dict() #for card in cards_sorted: # output_dict[card.suit.name] = "".join([i.rank.name for i in cards_sorted if i.suit.name == card.suit.name]) return sum(1 for tons in self.dict_rep.values() if len(tons) == 1)
def singletons(self) -> int: """ Return the number of singletons contained in this hand """ #output_dict = dict() #for card in cards_sorted: # output_dict[card.suit.name] = "".join([i.rank.name for i in cards_sorted if i.suit.name == card.suit.name]) return sum(1 for tons in self.dict_rep.values() if len(tons) == 1)
Python
def voids(self) -> int: """ Return the number of voids (missing suits) contained in this hand """ #output_dict = dict() #for card in cards_sorted: # output_dict[card.suit.name] = "".join([i.rank.name for i in cards_sorted if i.suit.name == card.suit.name]) return 4-len(self.dict_rep)
def voids(self) -> int: """ Return the number of voids (missing suits) contained in this hand """ #output_dict = dict() #for card in cards_sorted: # output_dict[card.suit.name] = "".join([i.rank.name for i in cards_sorted if i.suit.name == card.suit.name]) return 4-len(self.dict_rep)
Python
def ssp(self) -> int: """ Return the number of short suit points in this hand. Doubletons are worth one point, singletons two points, voids 3 points """ output_dict = dict() for suit in 'SHDC': output_dict[suit] = len([i.rank.name for i in self.cards_sorted if i.suit.name == suit]) ssp_total = 0 for value in output_dict.values(): if 2 >= value >= 0: ssp_total += SSP[value] return ssp_total
def ssp(self) -> int: """ Return the number of short suit points in this hand. Doubletons are worth one point, singletons two points, voids 3 points """ output_dict = dict() for suit in 'SHDC': output_dict[suit] = len([i.rank.name for i in self.cards_sorted if i.suit.name == suit]) ssp_total = 0 for value in output_dict.values(): if 2 >= value >= 0: ssp_total += SSP[value] return ssp_total
Python
def ltc(self) -> int: """ Return the losing trick count for this hand - see bite description for the procedure """ #output_dict = dict() #for card in cards_sorted: # output_dict[card.suit.name] = "".join([i.rank.name for i in cards_sorted if i.suit.name == card.suit.name]) ltc = 0 for suit in 'SHDC': try: check = self.dict_rep[suit][:3] if len(check) == 0: continue elif len(check) == 1: if check == 'A': continue else: ltc += 1 elif len(check) == 2: if check == 'AK': continue elif 'A' in check or 'K' in check: ltc += 1 else: ltc += 2 else: if check == 'AKQ': continue elif 'AK' in check or 'AQ' in check or 'KQ' in check: ltc += 1 elif 'A' in check or 'K' in check or 'Q' in check: ltc += 2 else: ltc += 3 except: continue return ltc
def ltc(self) -> int: """ Return the losing trick count for this hand - see bite description for the procedure """ #output_dict = dict() #for card in cards_sorted: # output_dict[card.suit.name] = "".join([i.rank.name for i in cards_sorted if i.suit.name == card.suit.name]) ltc = 0 for suit in 'SHDC': try: check = self.dict_rep[suit][:3] if len(check) == 0: continue elif len(check) == 1: if check == 'A': continue else: ltc += 1 elif len(check) == 2: if check == 'AK': continue elif 'A' in check or 'K' in check: ltc += 1 else: ltc += 2 else: if check == 'AKQ': continue elif 'AK' in check or 'AQ' in check or 'KQ' in check: ltc += 1 elif 'A' in check or 'K' in check or 'Q' in check: ltc += 2 else: ltc += 3 except: continue return ltc
Python
def load_excel_into_dataframe(excel=EXCEL): """Load the SalesOrders sheet of the excel book (EXCEL variable) into a Pandas DataFrame and return it to the caller""" return pd.read_excel(EXCEL, sheet_name='SalesOrders') pass
def load_excel_into_dataframe(excel=EXCEL): """Load the SalesOrders sheet of the excel book (EXCEL variable) into a Pandas DataFrame and return it to the caller""" return pd.read_excel(EXCEL, sheet_name='SalesOrders') pass
Python
def calculate_gc_content(sequence): """ Receives a DNA sequence (A, G, C, or T) Returns the percentage of GC content (rounded to the last two digits) """ joined = "".join(sequence.lower()) count = Counter(joined) return round((count['g'] + count['c']) / (count['g'] + count['c'] + count['t'] + count['a'])*100,2) pass
def calculate_gc_content(sequence): """ Receives a DNA sequence (A, G, C, or T) Returns the percentage of GC content (rounded to the last two digits) """ joined = "".join(sequence.lower()) count = Counter(joined) return round((count['g'] + count['c']) / (count['g'] + count['c'] + count['t'] + count['a'])*100,2) pass
Python
def _get_pycons(): """Helper function that retrieves required PyCon data and returns a list of PyCon objects """ with open(pycons_file, "r", encoding="utf-8") as f: return [ PyCon( pycon["name"], pycon["city"], pycon["country"], parse(pycon["start_date"]), parse(pycon["end_date"]), pycon["url"], ) for pycon in json.load(f) ]
def _get_pycons(): """Helper function that retrieves required PyCon data and returns a list of PyCon objects """ with open(pycons_file, "r", encoding="utf-8") as f: return [ PyCon( pycon["name"], pycon["city"], pycon["country"], parse(pycon["start_date"]), parse(pycon["end_date"]), pycon["url"], ) for pycon in json.load(f) ]
Python
def _km_distance(origin, destination): """ Helper function that retrieves the air distance in kilometers for two pycons """ lon1, lat1, lon2, lat2 = map( radians, [origin.lon, origin.lat, destination.lon, destination.lat] ) return 6371 * ( acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon1 - lon2)) )
def _km_distance(origin, destination): """ Helper function that retrieves the air distance in kilometers for two pycons """ lon1, lat1, lon2, lat2 = map( radians, [origin.lon, origin.lat, destination.lon, destination.lat] ) return 6371 * ( acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon1 - lon2)) )
Python
def update_pycons_lat_lon(pycons): """ Update the latitudes and longitudes based on the city and country the PyCon takes places. Use requests from the Nominatim API stored in the nominatim_responses json file. """ with open(nominatim_responses, "r", encoding="utf-8") as f: place_data = json.load(f) for place in pycons: key = 'https://nominatim.openstreetmap.org/search?q=' + place.city + "," + place.country + '&format=json&accept-language=en' matching_nominatim_data = place_data[key] place.lat = float(matching_nominatim_data[0]['lat']) place.lon = float(matching_nominatim_data[0]['lon']) return pycons pass
def update_pycons_lat_lon(pycons): """ Update the latitudes and longitudes based on the city and country the PyCon takes places. Use requests from the Nominatim API stored in the nominatim_responses json file. """ with open(nominatim_responses, "r", encoding="utf-8") as f: place_data = json.load(f) for place in pycons: key = 'https://nominatim.openstreetmap.org/search?q=' + place.city + "," + place.country + '&format=json&accept-language=en' matching_nominatim_data = place_data[key] place.lat = float(matching_nominatim_data[0]['lat']) place.lon = float(matching_nominatim_data[0]['lon']) return pycons pass
Python
def create_travel_plan(pycons): """ Create your travel plan to visit all the PyCons. Assume it's now the start of 2019! Return a list of Trips with each Trip containing the origin PyCon, the destination PyCon and the travel distance between the PyCons. """ sorted_pycons = sorted(pycons, key=lambda x: x.start_date) trip_list = [] for i in range(len(sorted_pycons)-1): trip = Trip(sorted_pycons[i], sorted_pycons[i+1], _km_distance(sorted_pycons[i], sorted_pycons[i+1])) trip_list.append(trip) return trip_list pass
def create_travel_plan(pycons): """ Create your travel plan to visit all the PyCons. Assume it's now the start of 2019! Return a list of Trips with each Trip containing the origin PyCon, the destination PyCon and the travel distance between the PyCons. """ sorted_pycons = sorted(pycons, key=lambda x: x.start_date) trip_list = [] for i in range(len(sorted_pycons)-1): trip = Trip(sorted_pycons[i], sorted_pycons[i+1], _km_distance(sorted_pycons[i], sorted_pycons[i+1])) trip_list.append(trip) return trip_list pass
Python
def total_travel_distance(journey): """ Return the total travel distance of your PyCon journey in kilometers rounded to one decimal. """ return round(sum(trip.distance for trip in journey),1) pass
def total_travel_distance(journey): """ Return the total travel distance of your PyCon journey in kilometers rounded to one decimal. """ return round(sum(trip.distance for trip in journey),1) pass
Python
def common_words(sentence1: List[str], sentence2: List[str]) -> List[str]: """ Input: Two sentences - each is a list of words in case insensitive ways. Output: those common words appearing in both sentences. Capital and lowercase words are treated as the same word. If there are duplicate words in the results, just choose one word. Returned words should be sorted by word's length. """ common = [] lower_sentence1 = [word.lower() for word in sentence1] lower_sentence2 = [word.lower() for word in sentence2] for word in lower_sentence1: if word in lower_sentence2: common.append(word) return sorted(list(set(common)), key=len) pass
def common_words(sentence1: List[str], sentence2: List[str]) -> List[str]: """ Input: Two sentences - each is a list of words in case insensitive ways. Output: those common words appearing in both sentences. Capital and lowercase words are treated as the same word. If there are duplicate words in the results, just choose one word. Returned words should be sorted by word's length. """ common = [] lower_sentence1 = [word.lower() for word in sentence1] lower_sentence2 = [word.lower() for word in sentence2] for word in lower_sentence1: if word in lower_sentence2: common.append(word) return sorted(list(set(common)), key=len) pass
Python
def create_parser(): """TODO 2: Create an ArgumentParser object: - have one operation argument, - have one or more integers that can be operated on. Returns a argparse.ArgumentParser object. Note that type=float times out here so do the casting in the calculator function above!""" parser = argparse.ArgumentParser('A simple calculator') parser.add_argument('-a','--add', type=float, nargs='+') parser.add_argument('-s','--sub', type=float, nargs='+') parser.add_argument('-m','--mul', type=float, nargs='+') parser.add_argument('-d','--div', type=float, nargs='+') return parser pass
def create_parser(): """TODO 2: Create an ArgumentParser object: - have one operation argument, - have one or more integers that can be operated on. Returns a argparse.ArgumentParser object. Note that type=float times out here so do the casting in the calculator function above!""" parser = argparse.ArgumentParser('A simple calculator') parser.add_argument('-a','--add', type=float, nargs='+') parser.add_argument('-s','--sub', type=float, nargs='+') parser.add_argument('-m','--mul', type=float, nargs='+') parser.add_argument('-d','--div', type=float, nargs='+') return parser pass
Python
def remove_punctuation(input_string): """Return a str with punctuation chars stripped out""" tran_table = str.maketrans('','', '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~') output_string = input_string.translate(tran_table) return output_string pass
def remove_punctuation(input_string): """Return a str with punctuation chars stripped out""" tran_table = str.maketrans('','', '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~') output_string = input_string.translate(tran_table) return output_string pass
Python
def group_cars_by_manufacturer(cars): """Iterate though the list of (manufacturer, model) tuples of the cars list defined above and generate the output as described in the Bite description (see the tests for the full output). No return here, just print to the console. We use pytest > capfd to validate your output :) """ car_dict = defaultdict(list) for key, group in groupby(cars, lambda x: x[0]): car_dict[key] += group for lists in car_dict.values(): lists = sorted(lists) alphabetized_cars = sorted(car_dict.items()) final_brand = sorted(car_dict.keys())[-1] for key, value in alphabetized_cars: print(key.upper()) for i in value: print(f'- {i[1]}') if key != final_brand: print() pass
def group_cars_by_manufacturer(cars): """Iterate though the list of (manufacturer, model) tuples of the cars list defined above and generate the output as described in the Bite description (see the tests for the full output). No return here, just print to the console. We use pytest > capfd to validate your output :) """ car_dict = defaultdict(list) for key, group in groupby(cars, lambda x: x[0]): car_dict[key] += group for lists in car_dict.values(): lists = sorted(lists) alphabetized_cars = sorted(car_dict.items()) final_brand = sorted(car_dict.keys())[-1] for key, value in alphabetized_cars: print(key.upper()) for i in value: print(f'- {i[1]}') if key != final_brand: print() pass
Python
def cached_property(func): """decorator used to cache expensive object attribute lookup""" name = f'_{func.__name__}' @wraps(func) def checker(self, *args): if getattr(self, name) == None: setattr(self, name, func(self, *args)) return getattr(self, name) return property(checker)
def cached_property(func): """decorator used to cache expensive object attribute lookup""" name = f'_{func.__name__}' @wraps(func) def checker(self, *args): if getattr(self, name) == None: setattr(self, name, func(self, *args)) return getattr(self, name) return property(checker)
Python
def make_character_index(text=text, characters=CHARACTERS): """Return a dict with keys are characters (lowercased) and values the lines they appear in sorted order. Matches should be case insensitive. If a character has multiple synonyms - e.g. ('Grandmother', 'Grandma', 'Granny') - then return the former as key. """ index_dict = defaultdict(list) #unique_words = set() lower_text = text.lower() strip_punc = lower_text.translate(str.maketrans('','',string.punctuation)) #strip_punc = strip_punc.replace('\n', " ") #for word in strip_punc.split(" "): # unique_words.add(word) #unique_words.remove("") for word in characters: if type(word) == tuple: word_to_use = word[0].lower() for i in range(len(strip_punc.splitlines())): if any(words.lower() in strip_punc.splitlines()[i] for words in word): index_dict[word_to_use].append(i) else: for i in range(len(strip_punc.splitlines())): if word.lower() in strip_punc.splitlines()[i]: index_dict[word.lower()].append(i) return index_dict pass
def make_character_index(text=text, characters=CHARACTERS): """Return a dict with keys are characters (lowercased) and values the lines they appear in sorted order. Matches should be case insensitive. If a character has multiple synonyms - e.g. ('Grandmother', 'Grandma', 'Granny') - then return the former as key. """ index_dict = defaultdict(list) #unique_words = set() lower_text = text.lower() strip_punc = lower_text.translate(str.maketrans('','',string.punctuation)) #strip_punc = strip_punc.replace('\n', " ") #for word in strip_punc.split(" "): # unique_words.add(word) #unique_words.remove("") for word in characters: if type(word) == tuple: word_to_use = word[0].lower() for i in range(len(strip_punc.splitlines())): if any(words.lower() in strip_punc.splitlines()[i] for words in word): index_dict[word_to_use].append(i) else: for i in range(len(strip_punc.splitlines())): if word.lower() in strip_punc.splitlines()[i]: index_dict[word.lower()].append(i) return index_dict pass
Python
def spinner(seconds): """Make a terminal loader/spinner animation using the imports aboveself. Takes seconds argument = time for the spinner to runself. Does not return anything, only prints to stdout.""" spinner_cycle = cycle(SPINNER_STATES) for _ in range(round(seconds/STATE_TRANSITION_TIME)): sys.stdout.write(next(spinner_cycle)) sys.stdout.flush() sleep(STATE_TRANSITION_TIME) sys.stdout.write('\r') pass
def spinner(seconds): """Make a terminal loader/spinner animation using the imports aboveself. Takes seconds argument = time for the spinner to runself. Does not return anything, only prints to stdout.""" spinner_cycle = cycle(SPINNER_STATES) for _ in range(round(seconds/STATE_TRANSITION_TIME)): sys.stdout.write(next(spinner_cycle)) sys.stdout.flush() sleep(STATE_TRANSITION_TIME) sys.stdout.write('\r') pass
Python
def data(self) -> Optional[str]: """Reads the data from the File object. First it checks if the File object has any data. If it doesn't, it retrieves it and saves it to the File. Once the. It then reads it from the File and returns it. Returns: Optional[str] -- The string data from the File object. """ try: with open(file) as f: return f.read() except: return requests.get(file).text # retrieve data from the web pass
def data(self) -> Optional[str]: """Reads the data from the File object. First it checks if the File object has any data. If it doesn't, it retrieves it and saves it to the File. Once the. It then reads it from the File and returns it. Returns: Optional[str] -- The string data from the File object. """ try: with open(file) as f: return f.read() except: return requests.get(file).text # retrieve data from the web pass
Python
def soup(self) -> Soup: """Converts string data from File into a BeautifulSoup object. Returns: Soup -- BeautifulSoup object created from the File. """ return Soup(self.file, 'html.parser') pass
def soup(self) -> Soup: """Converts string data from File into a BeautifulSoup object. Returns: Soup -- BeautifulSoup object created from the File. """ return Soup(self.file, 'html.parser') pass
Python
def find_table(self, loc: int = 0) -> str: """Finds the table elements from the Soup object Keyword Arguments: loc {int} -- Parses the Web object for table elements and returns the first one that it finds unless an integer representing the required table is passed. (default: {0}) Returns: str -- The html table """ table = self.soup.find_all('table') return table[loc] pass
def find_table(self, loc: int = 0) -> str: """Finds the table elements from the Soup object Keyword Arguments: loc {int} -- Parses the Web object for table elements and returns the first one that it finds unless an integer representing the required table is passed. (default: {0}) Returns: str -- The html table """ table = self.soup.find_all('table') return table[loc] pass
Python
def parse_rows(self, table: Soup) -> List[Any]: """Abstract Method Parses the row data from the html table. Arguments: table {Soup} -- Parses a BeautifulSoup table element and returns the text found in the td elements as NamedTuple. Returns: List[NamedTuple] -- List of NamedTuple that were created from the table data. """ table_list = table.find_all('td') tuple_list = [] for row in table_list: candidate = row.find('span', class_='g-desktop').text average = row.find('span', class_='g-coverage').text delegates = row.find('span', class_='g-coverage').text contributions = row.find('span', class_='g-coverage').text coverage = row.find('span', class_='g-coverage').text tuple_list.append(LeaderBoard(candidate, average, delegates, contributions, coverage)) return tuple_list pass
def parse_rows(self, table: Soup) -> List[Any]: """Abstract Method Parses the row data from the html table. Arguments: table {Soup} -- Parses a BeautifulSoup table element and returns the text found in the td elements as NamedTuple. Returns: List[NamedTuple] -- List of NamedTuple that were created from the table data. """ table_list = table.find_all('td') tuple_list = [] for row in table_list: candidate = row.find('span', class_='g-desktop').text average = row.find('span', class_='g-coverage').text delegates = row.find('span', class_='g-coverage').text contributions = row.find('span', class_='g-coverage').text coverage = row.find('span', class_='g-coverage').text tuple_list.append(LeaderBoard(candidate, average, delegates, contributions, coverage)) return tuple_list pass
Python
def polls(self, table: int = 0) -> List[Any]: """Abstract Method Parses the data The find_table and parse_rows methods are called for you and the table index that is passed to it is used to get the correct table from the soup object. Keyword Arguments: table {int} -- Does the parsing of the table and rows for you. It takes the table index number if given, otherwise parses table 0. (default: {0}) Returns: List[NamedTuple] -- List of NamedTuple that were created from the table data. """ table_to_use = self.find_table(table) output_list = self.parse_rows(table_to_use) return output_list pass
def polls(self, table: int = 0) -> List[Any]: """Abstract Method Parses the data The find_table and parse_rows methods are called for you and the table index that is passed to it is used to get the correct table from the soup object. Keyword Arguments: table {int} -- Does the parsing of the table and rows for you. It takes the table index number if given, otherwise parses table 0. (default: {0}) Returns: List[NamedTuple] -- List of NamedTuple that were created from the table data. """ table_to_use = self.find_table(table) output_list = self.parse_rows(table_to_use) return output_list pass
Python
def stats(self, loc: int = 0): """Abstract Method Produces the stats from the polls. Keyword Arguments: loc {int} -- Formats the results from polls into a more user friendly representation. """ pass
def stats(self, loc: int = 0): """Abstract Method Produces the stats from the polls. Keyword Arguments: loc {int} -- Formats the results from polls into a more user friendly representation. """ pass
Python
def parse_rows(self, table: Soup) -> List[Poll]: """Parses the row data from the html table. Arguments: table {Soup} -- Parses a BeautifulSoup table element and returns the text found in the td elements as Poll namedtuples. Returns: List[Poll] -- List of Poll namedtuples that were created from the table data. """ table_to_use = soup.find_all('table') table_rows = table_to_use.find_all('td') list_of_tuples = [] for row in table_rows: poll = row.find() date = row.find() sample = row.find() Sanders = row.find() Biden = row.find() Gabbard = row.find() spread = row.find() list_of_tuples.append(Poll(poll, date, sample, Sanders, Biden, Gabbard, spread)) return list_of_tuples pass
def parse_rows(self, table: Soup) -> List[Poll]: """Parses the row data from the html table. Arguments: table {Soup} -- Parses a BeautifulSoup table element and returns the text found in the td elements as Poll namedtuples. Returns: List[Poll] -- List of Poll namedtuples that were created from the table data. """ table_to_use = soup.find_all('table') table_rows = table_to_use.find_all('td') list_of_tuples = [] for row in table_rows: poll = row.find() date = row.find() sample = row.find() Sanders = row.find() Biden = row.find() Gabbard = row.find() spread = row.find() list_of_tuples.append(Poll(poll, date, sample, Sanders, Biden, Gabbard, spread)) return list_of_tuples pass
Python
def polls(self, table: int = 0) -> List[Poll]: """Parses the data The find_table and parse_rows methods are called for you and the table index that is passed to it is used to get the correct table from the soup object. Keyword Arguments: table {int} -- Does the parsing of the table and rows for you. It takes the table index number if given, otherwise parses table 0. (default: {0}) Returns: List[Poll] -- List of Poll namedtuples that were created from the table data. """ table_for_parsing = self.find_table(0) parsed_rows = self.parse_rows(table_for_parsing) return parsed_rows pass
def polls(self, table: int = 0) -> List[Poll]: """Parses the data The find_table and parse_rows methods are called for you and the table index that is passed to it is used to get the correct table from the soup object. Keyword Arguments: table {int} -- Does the parsing of the table and rows for you. It takes the table index number if given, otherwise parses table 0. (default: {0}) Returns: List[Poll] -- List of Poll namedtuples that were created from the table data. """ table_for_parsing = self.find_table(0) parsed_rows = self.parse_rows(table_for_parsing) return parsed_rows pass
Python
def parse_rows(self, table: Soup) -> List[LeaderBoard]: """Parses the row data from the html table. Arguments: table {Soup} -- Parses a BeautifulSoup table element and returns the text found in the td elements as LeaderBoard namedtuples. Returns: List[LeaderBoard] -- List of LeaderBoard namedtuples that were created from the table data. """ table_list = table.find_all('td') tuple_list = [] for row in table_list: candidate = row.find('span', class_='g-desktop').text average = row.find('span', class_='g-coverage').text delegates = row.find('span', class_='g-coverage').text contributions = row.find('span', class_='g-coverage').text coverage = row.find('span', class_='g-coverage').text tuple_list.append(LeaderBoard(candidate, average, delegates, contributions, coverage)) return tuple_list pass
def parse_rows(self, table: Soup) -> List[LeaderBoard]: """Parses the row data from the html table. Arguments: table {Soup} -- Parses a BeautifulSoup table element and returns the text found in the td elements as LeaderBoard namedtuples. Returns: List[LeaderBoard] -- List of LeaderBoard namedtuples that were created from the table data. """ table_list = table.find_all('td') tuple_list = [] for row in table_list: candidate = row.find('span', class_='g-desktop').text average = row.find('span', class_='g-coverage').text delegates = row.find('span', class_='g-coverage').text contributions = row.find('span', class_='g-coverage').text coverage = row.find('span', class_='g-coverage').text tuple_list.append(LeaderBoard(candidate, average, delegates, contributions, coverage)) return tuple_list pass
Python
def polls(self, table: int = 0) -> List[LeaderBoard]: """Parses the data The find_table and parse_rows methods are called for you and the table index that is passed to it is used to get the correct table from the soup object. Keyword Arguments: table {int} -- Does the parsing of the table and rows for you. It takes the table index number if given, otherwise parses table 0. (default: {0}) Returns: List[LeaderBoard] -- List of LeaderBoard namedtuples that were created from the table data. """ table_for_parsing = self.find_table(0) parsed_rows = self.parse_rows(table_for_parsing) return parsed_rows pass
def polls(self, table: int = 0) -> List[LeaderBoard]: """Parses the data The find_table and parse_rows methods are called for you and the table index that is passed to it is used to get the correct table from the soup object. Keyword Arguments: table {int} -- Does the parsing of the table and rows for you. It takes the table index number if given, otherwise parses table 0. (default: {0}) Returns: List[LeaderBoard] -- List of LeaderBoard namedtuples that were created from the table data. """ table_for_parsing = self.find_table(0) parsed_rows = self.parse_rows(table_for_parsing) return parsed_rows pass
Python
def _preload_sequences(url=URL): """ Provided helper function Returns coding sequences, one sequence each line """ filename = os.path.join(os.getenv("TMP", "/tmp"), "NC_009641.txt") if not os.path.isfile(filename): urlretrieve(url, filename) with open(filename, "r") as f: return f.readlines()
def _preload_sequences(url=URL): """ Provided helper function Returns coding sequences, one sequence each line """ filename = os.path.join(os.getenv("TMP", "/tmp"), "NC_009641.txt") if not os.path.isfile(filename): urlretrieve(url, filename) with open(filename, "r") as f: return f.readlines()