sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def get_catalog(self, locale):
"""Create Django translation catalogue for `locale`."""
with translation.override(locale):
translation_engine = DjangoTranslation(locale, domain=self.domain, localedirs=self.paths)
trans_cat = translation_engine._catalog
trans_fallback_cat = translation_engine._fallback._catalog if translation_engine._fallback else {}
return trans_cat, trans_fallback_cat | Create Django translation catalogue for `locale`. | entailment |
def get_paths(cls, packages):
"""Create list of matching packages for translation engine."""
allowable_packages = dict((app_config.name, app_config) for app_config in apps.get_app_configs())
app_configs = [allowable_packages[p] for p in packages if p in allowable_packages]
# paths of requested packages
return [os.path.join(app.path, 'locale') for app in app_configs] | Create list of matching packages for translation engine. | entailment |
def get_catalogue_header_value(cls, catalog, key):
"""Get `.po` header value."""
header_value = None
if '' in catalog:
for line in catalog[''].split('\n'):
if line.startswith('%s:' % key):
header_value = line.split(':', 1)[1].strip()
return header_value | Get `.po` header value. | entailment |
def _num_plurals(self, catalogue):
"""
Return the number of plurals for this catalog language, or 2 if no
plural string is available.
"""
match = re.search(r'nplurals=\s*(\d+)', self.get_plural(catalogue) or '')
if match:
return int(match.groups()[0])
return 2 | Return the number of plurals for this catalog language, or 2 if no
plural string is available. | entailment |
def make_header(self, locale, catalog):
"""Populate header with correct data from top-most locale file."""
return {
"po-revision-date": self.get_catalogue_header_value(catalog, 'PO-Revision-Date'),
"mime-version": self.get_catalogue_header_value(catalog, 'MIME-Version'),
"last-translator": 'Automatic <[email protected]>',
"x-generator": "Python",
"language": self.get_catalogue_header_value(catalog, 'Language') or locale,
"lang": locale,
"content-transfer-encoding": self.get_catalogue_header_value(catalog, 'Content-Transfer-Encoding'),
"project-id-version": self.get_catalogue_header_value(catalog, 'Project-Id-Version'),
"pot-creation-date": self.get_catalogue_header_value(catalog, 'POT-Creation-Date'),
"domain": self.domain,
"report-msgid-bugs-to": self.get_catalogue_header_value(catalog, 'Report-Msgid-Bugs-To'),
"content-type": self.get_catalogue_header_value(catalog, 'Content-Type'),
"plural-forms": self.get_plural(catalog),
"language-team": self.get_catalogue_header_value(catalog, 'Language-Team')
} | Populate header with correct data from top-most locale file. | entailment |
def collect_translations(self):
"""Collect all `domain` translations and return `Tuple[languages, locale_data]`"""
languages = {}
locale_data = {}
for language_code, label in settings.LANGUAGES:
languages[language_code] = '%s' % label
# Create django translation engine for `language_code`
trans_cat, trans_fallback_cat = self.get_catalog(language_code)
# Add the meta object
locale_data[language_code] = {}
locale_data[language_code][""] = self.make_header(language_code, trans_cat)
num_plurals = self._num_plurals(trans_cat)
# Next code is largely taken from Django@master (01.10.2017) from `django.views.i18n JavaScriptCatalogue`
pdict = {}
seen_keys = set()
for key, value in itertools.chain(six.iteritems(trans_cat), six.iteritems(trans_fallback_cat)):
if key == '' or key in seen_keys:
continue
if isinstance(key, six.string_types):
locale_data[language_code][key] = [value]
elif isinstance(key, tuple):
msgid, cnt = key
pdict.setdefault(msgid, {})[cnt] = value
else:
raise TypeError(key)
seen_keys.add(key)
for k, v in pdict.items():
locale_data[language_code][k] = [v.get(i, '') for i in range(num_plurals)]
for key, value in locale_data.items():
locale_data[key] = json.dumps(value)
return languages, locale_data | Collect all `domain` translations and return `Tuple[languages, locale_data]` | entailment |
def get_endpoint_obj(client, endpoint, object_id):
''' Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result '''
endpoint = '/'.join([endpoint, str(object_id)])
return client.authenticated_request(endpoint).json() | Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result | entailment |
def update_endpoint_obj(client, endpoint, object_id, revision, data):
'''
Helper method to ease the repetitiveness of updating an... SO VERY DRY
(That's a doubly-effective pun becuase my predecessor - https://github.com/bsmt/wunderpy - found maintaing a Python Wunderlist API to be "as tedious and boring as a liberal arts school poetry slam")
'''
data['revision'] = int(revision)
endpoint = '/'.join([endpoint, str(object_id)])
return client.authenticated_request(endpoint, 'PATCH', data=data).json() | Helper method to ease the repetitiveness of updating an... SO VERY DRY
(That's a doubly-effective pun becuase my predecessor - https://github.com/bsmt/wunderpy - found maintaing a Python Wunderlist API to be "as tedious and boring as a liberal arts school poetry slam") | entailment |
def _validate_response(self, method, response):
''' Helper method to validate the given to a Wunderlist API request is as expected '''
# TODO Fill this out using the error codes here: https://developer.wunderlist.com/documentation/concepts/formats
# The expected results can change based on API version, so validate this here
if self.api_version:
if response.status_code >= 400:
raise ValueError('{} {}'.format(response.status_code, str(response.json())))
if method == 'GET':
assert response.status_code == 200
elif method == 'POST':
assert response.status_code == 201
elif method == 'PATCH':
assert response.status_code == 200
elif method == 'DELETE':
assert response.status_code == 204 | Helper method to validate the given to a Wunderlist API request is as expected | entailment |
def request(self, endpoint, method='GET', headers=None, params=None, data=None):
'''
Send a request to the given Wunderlist API endpoint
Params:
endpoint -- API endpoint to send request to
Keyword Args:
headers -- headers to add to the request
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
'''
if not headers:
headers = {}
if method in ['POST', 'PATCH', 'PUT']:
headers['Content-Type'] = 'application/json'
url = '/'.join([self.api_url, 'v' + self.api_version, endpoint])
data = json.dumps(data) if data else None
try:
response = requests.request(method=method, url=url, params=params, headers=headers, data=data)
# TODO Does recreating the exception classes 'requests' use suck? Yes, but it sucks more to expose the underlying library I use
except requests.exceptions.Timeout as e:
raise wp_exceptions.TimeoutError(e)
except requests.exceptions.ConnectionError as e:
raise wp_exceptions.ConnectionError(e)
self._validate_response(method, response)
return response | Send a request to the given Wunderlist API endpoint
Params:
endpoint -- API endpoint to send request to
Keyword Args:
headers -- headers to add to the request
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request | entailment |
def get_access_token(self, code, client_id, client_secret):
'''
Exchange a temporary code for an access token allowing access to a user's account
See https://developer.wunderlist.com/documentation/concepts/authorization for more info
'''
headers = {
'Content-Type' : 'application/json'
}
data = {
'client_id' : client_id,
'client_secret' : client_secret,
'code' : code,
}
str_data = json.dumps(data)
response = requests.request(method='POST', url=ACCESS_TOKEN_URL, headers=headers, data=str_data)
status_code = response.status_code
if status_code != 200:
raise ValueError("{} -- {}".format(status_code, response.json()))
return body['access_token'] | Exchange a temporary code for an access token allowing access to a user's account
See https://developer.wunderlist.com/documentation/concepts/authorization for more info | entailment |
def authenticated_request(self, endpoint, method='GET', params=None, data=None):
'''
Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type
Params:
endpoint -- API endpoint to send request to
Keyword Args:
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
'''
headers = {
'X-Access-Token' : self.access_token,
'X-Client-ID' : self.client_id
}
return self.api.request(endpoint, method=method, headers=headers, params=params, data=data) | Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type
Params:
endpoint -- API endpoint to send request to
Keyword Args:
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request | entailment |
def update_list(self, list_id, revision, title=None, public=None):
''' Updates the list with the given ID to have the given title and public flag '''
return lists_endpoint.update_list(self, list_id, revision, title=title, public=public) | Updates the list with the given ID to have the given title and public flag | entailment |
def get_tasks(self, list_id, completed=False):
''' Gets tasks for the list with the given ID, filtered by the given completion flag '''
return tasks_endpoint.get_tasks(self, list_id, completed=completed) | Gets tasks for the list with the given ID, filtered by the given completion flag | entailment |
def create_task(self, list_id, title, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None):
''' Creates a new task with the given information in the list with the given ID '''
return tasks_endpoint.create_task(self, list_id, title, assignee_id=assignee_id, completed=completed, recurrence_type=recurrence_type, recurrence_count=recurrence_count, due_date=due_date, starred=starred) | Creates a new task with the given information in the list with the given ID | entailment |
def update_task(self, task_id, revision, title=None, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None, remove=None):
'''
Updates the task with the given ID to have the given information
NOTE: The 'remove' parameter is an optional list of parameters to remove from the given task, e.g. ['due_date']
'''
return tasks_endpoint.update_task(self, task_id, revision, title=title, assignee_id=assignee_id, completed=completed, recurrence_type=recurrence_type, recurrence_count=recurrence_count, due_date=due_date, starred=starred, remove=remove) | Updates the task with the given ID to have the given information
NOTE: The 'remove' parameter is an optional list of parameters to remove from the given task, e.g. ['due_date'] | entailment |
def update_note(self, note_id, revision, content):
''' Updates the note with the given ID to have the given content '''
return notes_endpoint.update_note(self, note_id, revision, content) | Updates the note with the given ID to have the given content | entailment |
def get_task_subtasks(self, task_id, completed=False):
''' Gets subtasks for task with given ID '''
return subtasks_endpoint.get_task_subtasks(self, task_id, completed=completed) | Gets subtasks for task with given ID | entailment |
def get_list_subtasks(self, list_id, completed=False):
''' Gets subtasks for the list with given ID '''
return subtasks_endpoint.get_list_subtasks(self, list_id, completed=completed) | Gets subtasks for the list with given ID | entailment |
def create_subtask(self, task_id, title, completed=False):
'''
Creates a subtask with the given title under the task with the given ID
Return:
Newly-created subtask
'''
return subtasks_endpoint.create_subtask(self, task_id, title, completed=completed) | Creates a subtask with the given title under the task with the given ID
Return:
Newly-created subtask | entailment |
def update_subtask(self, subtask_id, revision, title=None, completed=None):
'''
Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information
Returns:
Subtask with given ID with properties and revision updated
'''
return subtasks_endpoint.update_subtask(self, subtask_id, revision, title=title, completed=completed) | Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information
Returns:
Subtask with given ID with properties and revision updated | entailment |
def update_list_positions_obj(self, positions_obj_id, revision, values):
'''
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout
'''
return positions_endpoints.update_list_positions_obj(self, positions_obj_id, revision, values) | Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout | entailment |
def update_task_positions_obj(self, positions_obj_id, revision, values):
'''
Updates the ordering of tasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated TaskPositionsObj-mapped object defining the order of list layout
'''
return positions_endpoints.update_task_positions_obj(self, positions_obj_id, revision, values) | Updates the ordering of tasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated TaskPositionsObj-mapped object defining the order of list layout | entailment |
def update_subtask_positions_obj(self, positions_obj_id, revision, values):
'''
Updates the ordering of subtasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated SubtaskPositionsObj-mapped object defining the order of list layout
'''
return positions_endpoints.update_subtask_positions_obj(self, positions_obj_id, revision, values) | Updates the ordering of subtasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated SubtaskPositionsObj-mapped object defining the order of list layout | entailment |
def _check_date_format(date, api):
''' Checks that the given date string conforms to the given API's date format specification '''
try:
datetime.datetime.strptime(date, api.DATE_FORMAT)
except ValueError:
raise ValueError("Date '{}' does not conform to API format: {}".format(date, api.DATE_FORMAT)) | Checks that the given date string conforms to the given API's date format specification | entailment |
def get_tasks(client, list_id, completed=False):
''' Gets un/completed tasks for the given list ID '''
params = {
'list_id' : str(list_id),
'completed' : completed
}
response = client.authenticated_request(client.api.Endpoints.TASKS, params=params)
return response.json() | Gets un/completed tasks for the given list ID | entailment |
def get_task(client, task_id):
''' Gets task information for the given ID '''
endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])
response = client.authenticated_request(endpoint)
return response.json() | Gets task information for the given ID | entailment |
def create_task(client, list_id, title, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None):
'''
Creates a task in the given list
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information
'''
_check_title_length(title, client.api)
if (recurrence_type is None and recurrence_count is not None) or (recurrence_type is not None and recurrence_count is None):
raise ValueError("recurrence_type and recurrence_count are required are required together")
if due_date is not None:
_check_date_format(due_date, client.api)
data = {
'list_id' : int(list_id) if list_id else None,
'title' : title,
'assignee_id' : int(assignee_id) if assignee_id else None,
'completed' : completed,
'recurrence_type' : recurrence_type,
'recurrence_count' : int(recurrence_count) if recurrence_count else None,
'due_date' : due_date,
'starred' : starred,
}
data = { key: value for key, value in data.items() if value is not None }
response = client.authenticated_request(client.api.Endpoints.TASKS, 'POST', data=data)
return response.json() | Creates a task in the given list
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information | entailment |
def update_task(client, task_id, revision, title=None, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None, remove=None):
'''
Updates the task with the given ID
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
if (recurrence_type is None and recurrence_count is not None) or (recurrence_type is not None and recurrence_count is None):
raise ValueError("recurrence_type and recurrence_count are required are required together")
if due_date is not None:
_check_date_format(due_date, client.api)
data = {
'revision' : int(revision),
'title' : title,
'assignee_id' : int(assignee_id) if assignee_id else None,
'completed' : completed,
'recurrence_type' : recurrence_type,
'recurrence_count' : int(recurrence_count) if recurrence_count else None,
'due_date' : due_date,
'starred' : starred,
'remove' : remove,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | Updates the task with the given ID
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information | entailment |
def _check_title_length(title, api):
''' Checks the given title against the given API specifications to ensure it's short enough '''
if len(title) > api.MAX_LIST_TITLE_LENGTH:
raise ValueError("Title cannot be longer than {} characters".format(api.MAX_TASK_TITLE_LENGTH)) | Checks the given title against the given API specifications to ensure it's short enough | entailment |
def get_lists(client):
''' Gets all the client's lists '''
response = client.authenticated_request(client.api.Endpoints.LISTS)
return response.json() | Gets all the client's lists | entailment |
def get_list(client, list_id):
''' Gets the given list '''
endpoint = '/'.join([client.api.Endpoints.LISTS, str(list_id)])
response = client.authenticated_request(endpoint)
return response.json() | Gets the given list | entailment |
def create_list(client, title):
''' Creates a new list with the given title '''
_check_title_length(title, client.api)
data = {
'title' : title,
}
response = client.authenticated_request(client.api.Endpoints.LISTS, method='POST', data=data)
return response.json() | Creates a new list with the given title | entailment |
def update_list(client, list_id, revision, title=None, public=None):
'''
Updates the list with the given ID to have the given properties
See https://developer.wunderlist.com/documentation/endpoints/list for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
data = {
'revision' : revision,
'title' : title,
'public' : public,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.LISTS, str(list_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | Updates the list with the given ID to have the given properties
See https://developer.wunderlist.com/documentation/endpoints/list for detailed parameter information | entailment |
def get_list_positions_obj(client, positions_obj_id):
'''
Gets the object that defines how lists are ordered (there will always be only one of these)
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A ListPositionsObj-mapped object defining the order of list layout
'''
return endpoint_helpers.get_endpoint_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id) | Gets the object that defines how lists are ordered (there will always be only one of these)
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A ListPositionsObj-mapped object defining the order of list layout | entailment |
def update_list_positions_obj(client, positions_obj_id, revision, values):
'''
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout
'''
return _update_positions_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id, revision, values) | Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout | entailment |
def get_task_positions_objs(client, list_id):
'''
Gets a list containing the object that encapsulates information about the order lists are laid out in. This list will always contain exactly one object.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A list containing a single ListPositionsObj-mapped object
'''
params = {
'list_id' : int(list_id)
}
response = client.authenticated_request(client.api.Endpoints.TASK_POSITIONS, params=params)
return response.json() | Gets a list containing the object that encapsulates information about the order lists are laid out in. This list will always contain exactly one object.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A list containing a single ListPositionsObj-mapped object | entailment |
def get_task_subtask_positions_objs(client, task_id):
'''
Gets a list of the positions of a single task's subtasks
Each task should (will?) only have one positions object defining how its subtasks are laid out
'''
params = {
'task_id' : int(task_id)
}
response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params)
return response.json() | Gets a list of the positions of a single task's subtasks
Each task should (will?) only have one positions object defining how its subtasks are laid out | entailment |
def get_list_subtask_positions_objs(client, list_id):
'''
Gets all subtask positions objects for the tasks within a given list. This is a convenience method so you don't have to get all the list's tasks before getting subtasks, though I can't fathom how mass subtask reordering is useful.
Returns:
List of SubtaskPositionsObj-mapped objects representing the order of subtasks for the tasks within the given list
'''
params = {
'list_id' : int(list_id)
}
response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params)
return response.json() | Gets all subtask positions objects for the tasks within a given list. This is a convenience method so you don't have to get all the list's tasks before getting subtasks, though I can't fathom how mass subtask reordering is useful.
Returns:
List of SubtaskPositionsObj-mapped objects representing the order of subtasks for the tasks within the given list | entailment |
def _check_title_length(title, api):
''' Checks the given title against the given API specifications to ensure it's short enough '''
if len(title) > api.MAX_SUBTASK_TITLE_LENGTH:
raise ValueError("Title cannot be longer than {} characters".format(api.MAX_SUBTASK_TITLE_LENGTH)) | Checks the given title against the given API specifications to ensure it's short enough | entailment |
def get_task_subtasks(client, task_id, completed=False):
''' Gets subtasks for task with given ID '''
params = {
'task_id' : int(task_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() | Gets subtasks for task with given ID | entailment |
def get_list_subtasks(client, list_id, completed=False):
''' Gets subtasks for the list with given ID '''
params = {
'list_id' : int(list_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() | Gets subtasks for the list with given ID | entailment |
def get_subtask(client, subtask_id):
''' Gets the subtask with the given ID '''
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
response = client.authenticated_request(endpoint)
return response.json() | Gets the subtask with the given ID | entailment |
def create_subtask(client, task_id, title, completed=False):
''' Creates a subtask with the given title under the task with the given ID '''
_check_title_length(title, client.api)
data = {
'task_id' : int(task_id) if task_id else None,
'title' : title,
'completed' : completed,
}
data = { key: value for key, value in data.items() if value is not None }
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, 'POST', data=data)
return response.json() | Creates a subtask with the given title under the task with the given ID | entailment |
def update_subtask(client, subtask_id, revision, title=None, completed=None):
'''
Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
data = {
'revision' : int(revision),
'title' : title,
'completed' : completed,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information | entailment |
def delete_subtask(client, subtask_id, revision):
''' Deletes the subtask with the given ID provided the given revision equals the revision the server has '''
params = {
'revision' : int(revision),
}
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
client.authenticated_request(endpoint, 'DELETE', params=params) | Deletes the subtask with the given ID provided the given revision equals the revision the server has | entailment |
def wait(animation='elipses', text='', speed=0.2):
"""
Decorator for adding wait animation to long running
functions.
Args:
animation (str, tuple): String reference to animation or tuple
with custom animation.
speed (float): Number of seconds each cycle of animation.
Examples:
>>> @animation.wait('bar')
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return
"""
def decorator(func):
func.animation = animation
func.speed = speed
func.text = text
@wraps(func)
def wrapper(*args, **kwargs):
animation = func.animation
text = func.text
if not isinstance(animation, (list, tuple)) and \
not hasattr(animations, animation):
text = animation if text == '' else text
animation = 'elipses'
wait = Wait(animation=animation, text=text, speed=func.speed)
wait.start()
try:
ret = func(*args, **kwargs)
finally:
wait.stop()
sys.stdout.write('\n')
return ret
return wrapper
return decorator | Decorator for adding wait animation to long running
functions.
Args:
animation (str, tuple): String reference to animation or tuple
with custom animation.
speed (float): Number of seconds each cycle of animation.
Examples:
>>> @animation.wait('bar')
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return | entailment |
def simple_wait(func):
"""
Decorator for adding simple text wait animation to
long running functions.
Examples:
>>> @animation.simple_wait
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return
"""
@wraps(func)
def wrapper(*args, **kwargs):
wait = Wait()
wait.start()
try:
ret = func(*args, **kwargs)
finally:
wait.stop()
sys.stdout.write('\n')
return ret
return wrapper | Decorator for adding simple text wait animation to
long running functions.
Examples:
>>> @animation.simple_wait
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return | entailment |
def start(self):
"""
Start animation thread.
"""
self.thread = threading.Thread(target=self._animate)
self.thread.start()
return | Start animation thread. | entailment |
def stop(self):
"""
Stop animation thread.
"""
time.sleep(self.speed)
self._count = -9999
sys.stdout.write(self.reverser + '\r\033[K\033[A')
sys.stdout.flush()
return | Stop animation thread. | entailment |
def merge(tup):
"""Merge several timeseries
Arguments:
tup: sequence of Timeseries, with the same shape except for axis 0
Returns:
Resulting merged timeseries which can have duplicate time points.
"""
if not all(tuple(ts.shape[1:] == tup[0].shape[1:] for ts in tup[1:])):
raise ValueError('Timeseries to merge must have compatible shapes')
indices = np.vstack(tuple(ts.tspan for ts in tup)).argsort()
return np.vstack((tup))[indices] | Merge several timeseries
Arguments:
tup: sequence of Timeseries, with the same shape except for axis 0
Returns:
Resulting merged timeseries which can have duplicate time points. | entailment |
def add_analyses(cls, source):
"""Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work.
"""
if isinstance(source, types.FunctionType):
_add_single_method(source.__name__, source)
else:
if isinstance(source, types.ModuleType):
mod = source
elif isinstance(source, types.StringTypes):
import os
import imp
path = os.path.abspath(source)
if os.path.isfile(path) and path[-3:] == '.py':
dir, file = os.path.split(path)
name = file[:-3]
module_info = imp.find_module(name, [dir])
mod = imp.load_module('nsim.' + name, *module_info)
elif (os.path.isdir(path) and
'__init__.py' in os.listdir(path)):
module_info = imp.find_module('__init__', [path])
name = os.path.basename(path)
mod = imp.load_module('nsim.' + name, *module_info)
else:
raise Error('"%s" is not a file or directory' % source)
else:
raise ValueError('`source` argument not a function or module')
for name, obj in mod.__dict__.items():
if name[0] != '_' and isinstance(obj, types.FunctionType):
cls._add_single_method(name, obj) | Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work. | entailment |
def absolute(self):
"""Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
"""
return Timeseries(np.absolute(self), self.tspan, self.labels) | Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2) | entailment |
def angle(self, deg=False):
"""Return the angle of the complex argument.
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
"""
if self.dtype.str[1] != 'c':
warnings.warn('angle() is intended for complex-valued timeseries',
RuntimeWarning, 1)
return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels) | Return the angle of the complex argument.
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64. | entailment |
def swapaxes(self, axis1, axis2):
"""Interchange two axes of a Timeseries."""
if self.ndim <=1 or axis1 == axis2:
return self
ar = np.asarray(self).swapaxes(axis1, axis2)
if axis1 != 0 and axis2 != 0:
# then axis 0 is unaffected by the swap
labels = self.labels[:]
labels[axis1], labels[axis2] = labels[axis2], labels[axis1]
return Timeseries(ar, self.tspan, labels)
return ar | Interchange two axes of a Timeseries. | entailment |
def transpose(self, *axes):
"""Permute the dimensions of a Timeseries."""
if self.ndim <= 1:
return self
ar = np.asarray(self).transpose(*axes)
if axes[0] != 0:
# then axis 0 is unaffected by the transposition
newlabels = [self.labels[ax] for ax in axes]
return Timeseries(ar, self.tspan, newlabels)
else:
return ar | Permute the dimensions of a Timeseries. | entailment |
def reshape(self, newshape, order='C'):
"""If axis 0 is unaffected by the reshape, then returns a Timeseries,
otherwise returns an ndarray. Preserves labels of axis j only if all
axes<=j are unaffected by the reshape.
See ``numpy.ndarray.reshape()`` for more information
"""
oldshape = self.shape
ar = np.asarray(self).reshape(newshape, order=order)
if (newshape is -1 and len(oldshape) is 1 or
(isinstance(newshape, numbers.Integral) and
newshape == oldshape[0]) or
(isinstance(newshape, Sequence) and
(newshape[0] == oldshape[0] or
(newshape[0] is -1 and np.array(oldshape[1:]).prod() ==
np.array(newshape[1:]).prod())))):
# then axis 0 is unaffected by the reshape
newlabels = [None] * ar.ndim
i = 1
while i < ar.ndim and i < self.ndim and ar.shape[i] == oldshape[i]:
newlabels[i] = self.labels[i]
i += 1
return Timeseries(ar, self.tspan, newlabels)
else:
return ar | If axis 0 is unaffected by the reshape, then returns a Timeseries,
otherwise returns an ndarray. Preserves labels of axis j only if all
axes<=j are unaffected by the reshape.
See ``numpy.ndarray.reshape()`` for more information | entailment |
def merge(self, ts):
"""Merge another timeseries with this one
Arguments:
ts (Timeseries): The two timeseries being merged must have the
same shape except for axis 0.
Returns:
Resulting merged timeseries which can have duplicate time points.
"""
if ts.shape[1:] != self.shape[1:]:
raise ValueError('Timeseries to merge must have compatible shapes')
indices = np.vstack((self.tspan, ts.tspan)).argsort()
return np.vstack((self, ts))[indices] | Merge another timeseries with this one
Arguments:
ts (Timeseries): The two timeseries being merged must have the
same shape except for axis 0.
Returns:
Resulting merged timeseries which can have duplicate time points. | entailment |
def expand_dims(self, axis):
"""Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
"""
if axis == -1:
axis = self.ndim
array = np.expand_dims(self, axis)
if axis == 0:
# prepended an axis: no longer a Timeseries
return array
else:
new_labels = self.labels.insert(axis, None)
return Timeseries(array, self.tspan, new_labels) | Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted. | entailment |
def concatenate(self, tup, axis=0):
"""Join a sequence of Timeseries to this one
Args:
tup (sequence of Timeseries): timeseries to be joined with this one.
They must have the same shape as this Timeseries, except in the
dimension corresponding to `axis`.
axis (int, optional): The axis along which timeseries will be joined.
Returns:
res (Timeseries or ndarray)
"""
if not isinstance(tup, Sequence):
tup = (tup,)
if tup is (None,) or len(tup) is 0:
return self
tup = (self,) + tuple(tup)
new_array = np.concatenate(tup, axis)
if not all(hasattr(ts, 'tspan') and
hasattr(ts, 'labels') for ts in tup):
return new_array
if axis == 0:
starts = [ts.tspan[0] for ts in tup]
ends = [ts.tspan[-1] for ts in tup]
if not all(starts[i] > ends[i-1] for i in range(1, len(starts))):
# series being joined are not ordered in time. not Timeseries
return new_array
else:
new_tspan = np.concatenate([ts.tspan for ts in tup])
else:
new_tspan = self.tspan
new_labels = [None]
for ax in range(1, new_array.ndim):
if ax == axis:
axislabels = []
for ts in tup:
if ts.labels[axis] is None:
axislabels.extend('' * ts.shape[axis])
else:
axislabels.extend(ts.labels[axis])
if all(lab == '' for lab in axislabels):
new_labels.append(None)
else:
new_labels.append(axislabels)
else:
# non-concatenation axis
axlabels = tup[0].labels[ax]
if not all(ts.labels[ax] == axlabels for ts in tup[1:]):
# series to be joined do not agree on labels for this axis
axlabels = None
new_labels.append(axlabels)
return self.__new__(self.__class__, new_array, new_tspan, new_labels) | Join a sequence of Timeseries to this one
Args:
tup (sequence of Timeseries): timeseries to be joined with this one.
They must have the same shape as this Timeseries, except in the
dimension corresponding to `axis`.
axis (int, optional): The axis along which timeseries will be joined.
Returns:
res (Timeseries or ndarray) | entailment |
def split(self, indices_or_sections, axis=0):
"""Split a timeseries into multiple sub-timeseries"""
if not isinstance(indices_or_sections, numbers.Integral):
raise Error('splitting by array of indices is not yet implemented')
n = indices_or_sections
if self.shape[axis] % n != 0:
raise ValueError("Array split doesn't result in an equal division")
step = self.shape[axis] / n
pieces = []
start = 0
while start < self.shape[axis]:
stop = start + step
ix = [slice(None)] * self.ndim
ix[axis] = slice(start, stop)
ix = tuple(ix)
pieces.append(self[ix])
start += step
return pieces | Split a timeseries into multiple sub-timeseries | entailment |
def plot(dts, title=None, points=None, show=True):
"""Plot a distributed timeseries
Args:
dts (DistTimeseries)
title (str, optional)
points (int, optional): Limit the number of time points plotted.
If specified, will downsample to use this total number of time points,
and only fetch back the necessary points to the client for plotting.
Returns:
fig
"""
if points is not None and len(dts.tspan) > points:
# then downsample (TODO: use interpolation)
ix = np.linspace(0, len(dts.tspan) - 1, points).astype(np.int64)
dts = dts[ix, ...]
ts = distob.gather(dts)
return ts.plot(title, show) | Plot a distributed timeseries
Args:
dts (DistTimeseries)
title (str, optional)
points (int, optional): Limit the number of time points plotted.
If specified, will downsample to use this total number of time points,
and only fetch back the necessary points to the client for plotting.
Returns:
fig | entailment |
def phase_histogram(dts, times=None, nbins=30, colormap=mpl.cm.Blues):
"""Plot a polar histogram of a phase variable's probability distribution
Args:
dts: DistTimeseries with axis 2 ranging over separate instances of an
oscillator (time series values are assumed to represent an angle)
times (float or sequence of floats): The target times at which
to plot the distribution
nbins (int): number of histogram bins
colormap
"""
if times is None:
times = np.linspace(dts.tspan[0], dts.tspan[-1], num=4)
elif isinstance(times, numbers.Number):
times = np.array([times], dtype=np.float64)
indices = distob.gather(dts.tspan.searchsorted(times))
if indices[-1] == len(dts.tspan):
indices[-1] -= 1
nplots = len(indices)
fig = plt.figure()
n = np.zeros((nbins, nplots))
for i in range(nplots):
index = indices[i]
time = dts.tspan[index]
phases = distob.gather(dts.mod2pi()[index, 0, :])
ax = fig.add_subplot(1, nplots, i + 1, projection='polar')
n[:,i], bins, patches = ax.hist(phases, nbins, (-np.pi, np.pi),
density=True, histtype='bar')
ax.set_title('time = %d s' % time)
ax.set_xticklabels(['0', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$',
r'$\frac{3\pi}{4}$', r'$\pi$', r'$\frac{-3\pi}{4}$',
r'$\frac{-\pi}{2}$', r'$\frac{-\pi}{4}$'])
nmin, nmax = n.min(), n.max()
#TODO should make a custom colormap instead of reducing color dynamic range:
norm = mpl.colors.Normalize(1.2*nmin - 0.2*nmax,
0.6*nmin + 0.4*nmax, clip=True)
for i in range(nplots):
ax = fig.get_axes()[i]
ax.set_ylim(0, nmax)
for this_n, thispatch in zip(n[:,i], ax.patches):
color = colormap(norm(this_n))
thispatch.set_facecolor(color)
thispatch.set_edgecolor(color)
fig.show() | Plot a polar histogram of a phase variable's probability distribution
Args:
dts: DistTimeseries with axis 2 ranging over separate instances of an
oscillator (time series values are assumed to represent an angle)
times (float or sequence of floats): The target times at which
to plot the distribution
nbins (int): number of histogram bins
colormap | entailment |
def psd(ts, nperseg=1500, noverlap=1200, plot=True):
"""plot Welch estimate of power spectral density, using nperseg samples per
segment, with noverlap samples overlap and Hamming window."""
ts = ts.squeeze()
if ts.ndim is 1:
ts = ts.reshape((-1, 1))
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
window = signal.hamming(nperseg, sym=False)
nfft = max(256, 2**np.int(np.log2(nperseg) + 1))
freqs, pxx = signal.welch(ts, fs, window, nperseg, noverlap, nfft,
detrend='linear', axis=0)
# Discard estimates for freq bins that are too low for the window size.
# (require two full cycles to fit within the window)
index = np.nonzero(freqs >= 2.0*fs/nperseg)[0][0]
if index > 0:
freqs = freqs[index:]
pxx = pxx[index:]
# Discard estimate for last freq bin as too high for Nyquist frequency:
freqs = freqs[:-1]
pxx = pxx[:-1]
if plot is True:
_plot_psd(ts, freqs, pxx)
return freqs, pxx | plot Welch estimate of power spectral density, using nperseg samples per
segment, with noverlap samples overlap and Hamming window. | entailment |
def lowpass(ts, cutoff_hz, order=3):
"""forward-backward butterworth low-pass filter"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
cutoff = cutoff_hz/nyq
b, a = signal.butter(order, cutoff, btype='low')
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | forward-backward butterworth low-pass filter | entailment |
def bandpass(ts, low_hz, high_hz, order=3):
"""forward-backward butterworth band-pass filter"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
low = low_hz/nyq
high = high_hz/nyq
b, a = signal.butter(order, [low, high], btype='band')
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | forward-backward butterworth band-pass filter | entailment |
def notch(ts, freq_hz, bandwidth_hz=1.0):
"""notch filter to remove remove a particular frequency
Adapted from code by Sturla Molden
"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
freq = freq_hz/nyq
bandwidth = bandwidth_hz/nyq
R = 1.0 - 3.0*(bandwidth/2.0)
K = ((1.0 - 2.0*R*np.cos(np.pi*freq) + R**2) /
(2.0 - 2.0*np.cos(np.pi*freq)))
b, a = np.zeros(3), np.zeros(3)
a[0] = 1.0
a[1] = -2.0*R*np.cos(np.pi*freq)
a[2] = R**2
b[0] = K
b[1] = -2*K*np.cos(np.pi*freq)
b[2] = K
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | notch filter to remove remove a particular frequency
Adapted from code by Sturla Molden | entailment |
def hilbert(ts):
"""Analytic signal, using the Hilbert transform"""
output = signal.hilbert(signal.detrend(ts, axis=0), axis=0)
return Timeseries(output, ts.tspan, labels=ts.labels) | Analytic signal, using the Hilbert transform | entailment |
def hilbert_amplitude(ts):
"""Amplitude of the analytic signal, using the Hilbert transform"""
output = np.abs(signal.hilbert(signal.detrend(ts, axis=0), axis=0))
return Timeseries(output, ts.tspan, labels=ts.labels) | Amplitude of the analytic signal, using the Hilbert transform | entailment |
def hilbert_phase(ts):
"""Phase of the analytic signal, using the Hilbert transform"""
output = np.angle(signal.hilbert(signal.detrend(ts, axis=0), axis=0))
return Timeseries(output, ts.tspan, labels=ts.labels) | Phase of the analytic signal, using the Hilbert transform | entailment |
def cwt(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True):
"""Continuous wavelet transform
Note the full results can use a huge amount of memory at 64-bit precision
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m)
"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (1.0*ts.tspan[-1] - ts.tspan[0])
x = signal.detrend(ts, axis=0)
dtype = wavelet(fs/freqs[0], fs/freqs[0]).dtype
coefs = np.zeros((len(ts), len(freqs), channels), dtype)
for i in range(channels):
coefs[:, :, i] = roughcwt(x[:, i], cwtmorlet, fs/freqs).T
if plot:
_plot_cwt(ts, coefs, freqs)
if orig_ndim is 1:
coefs = coefs[:, :, 0]
return coefs | Continuous wavelet transform
Note the full results can use a huge amount of memory at 64-bit precision
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m) | entailment |
def cwt_distributed(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True):
"""Continuous wavelet transform using distributed computation.
(Currently just splits the data by channel. TODO split it further.)
Note: this function requires an IPython cluster to be started first.
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m)
"""
if ts.ndim is 1 or ts.shape[1] is 1:
return cwt(ts, freqs, wavelet, plot)
import distob
vcwt = distob.vectorize(cwt)
coefs = vcwt(ts, freqs, wavelet, plot=False)
if plot:
_plot_cwt(ts, coefs, freqs)
return coefs | Continuous wavelet transform using distributed computation.
(Currently just splits the data by channel. TODO split it further.)
Note: this function requires an IPython cluster to be started first.
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m) | entailment |
def _plot_cwt(ts, coefs, freqs, tsize=1024, fsize=512):
"""Plot time resolved power spectral density from cwt results
Args:
ts: the original Timeseries
coefs: continuous wavelet transform coefficients as calculated by cwt()
freqs: list of frequencies (in Hz) corresponding to coefs.
tsize, fsize: size of the plot (time axis and frequency axis, in pixels)
"""
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
import matplotlib.pyplot as plt
from scipy import interpolate
channels = ts.shape[1]
fig = plt.figure()
for i in range(channels):
rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1,
0.8, 0.85/channels)
ax = fig.add_axes(rect)
logpowers = np.log((coefs[:, :, i] * coefs[:, :, i].conj()).real)
tmin, tmax = ts.tspan[0], ts.tspan[-1]
fmin, fmax = freqs[0], freqs[-1]
tgrid, fgrid = np.mgrid[tmin:tmax:tsize*1j, fmin:fmax:fsize*1j]
gd = interpolate.interpn((ts.tspan, freqs), logpowers,
(tgrid, fgrid)).T
ax.imshow(gd, cmap='gnuplot2', aspect='auto', origin='lower',
extent=(tmin, tmax, fmin, fmax))
ax.set_ylabel('freq (Hz)')
fig.axes[0].set_title(u'log(power spectral density)')
fig.axes[channels - 1].set_xlabel('time (s)')
fig.show() | Plot time resolved power spectral density from cwt results
Args:
ts: the original Timeseries
coefs: continuous wavelet transform coefficients as calculated by cwt()
freqs: list of frequencies (in Hz) corresponding to coefs.
tsize, fsize: size of the plot (time axis and frequency axis, in pixels) | entailment |
def first_return_times(dts, c=None, d=0.0):
"""For an ensemble of time series, return the set of all time intervals
between successive returns to value c for all instances in the ensemble.
If c is not given, the default is the mean across all times and across all
time series in the ensemble.
Args:
dts (DistTimeseries)
c (float): Optional target value (default is the ensemble mean value)
d (float): Optional min distance from c to be attained between returns
Returns:
array of time intervals (Can take the mean of these to estimate the
expected first return time for the whole ensemble)
"""
if c is None:
c = dts.mean()
vmrt = distob.vectorize(analyses1.first_return_times)
all_intervals = vmrt(dts, c, d)
if hasattr(type(all_intervals), '__array_interface__'):
return np.ravel(all_intervals)
else:
return np.hstack([distob.gather(ilist) for ilist in all_intervals]) | For an ensemble of time series, return the set of all time intervals
between successive returns to value c for all instances in the ensemble.
If c is not given, the default is the mean across all times and across all
time series in the ensemble.
Args:
dts (DistTimeseries)
c (float): Optional target value (default is the ensemble mean value)
d (float): Optional min distance from c to be attained between returns
Returns:
array of time intervals (Can take the mean of these to estimate the
expected first return time for the whole ensemble) | entailment |
def variability_fp(ts, freqs=None, ncycles=6, plot=True):
"""Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power.
"""
if ts.ndim <= 2:
return analyses1.variability_fp(ts, freqs, ncycles, plot)
else:
return distob.vectorize(analyses1.variability_fp)(
ts, freqs, ncycles, plot) | Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power. | entailment |
def epochs(ts, variability=None, threshold=0.0, minlength=1.0, plot=True):
"""Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
if ts.ndim <= 2:
return analyses1.epochs_distributed(
ts, variability, threshold, minlength, plot)
else:
return distob.vectorize(analyses1.epochs)(
ts, variability, threshold, minlength, plot) | Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point) | entailment |
def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0,
proportion=0.75, plot=True):
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
"""
if ts.ndim <= 2:
return analyses1.epochs_joint(
ts, variability, threshold, minlength, plot)
else:
return distob.vectorize(analyses1.epochs_joint)(
ts, variability, threshold, minlength, plot) | Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point) | entailment |
def periods(dts, phi=0.0):
"""For an ensemble of oscillators, return the set of periods lengths of
all successive oscillations of all oscillators.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries of an oscillator phase begins (or ends) exactly at phi,
then the first (or last) oscillation will be included.
Arguments:
dts (DistTimeseries): where dts.shape[1] is 1 (single output variable
representing phase) and axis 2 ranges over multiple realizations of
the oscillator.
phi=0.0: float
A single oscillation starts and ends at phase phi (by default zero).
"""
vperiods = distob.vectorize(analyses1.periods)
all_periods = vperiods(dts, phi)
if hasattr(type(all_periods), '__array_interface__'):
return np.ravel(all_periods)
else:
return np.hstack([distob.gather(plist) for plist in all_periods]) | For an ensemble of oscillators, return the set of periods lengths of
all successive oscillations of all oscillators.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries of an oscillator phase begins (or ends) exactly at phi,
then the first (or last) oscillation will be included.
Arguments:
dts (DistTimeseries): where dts.shape[1] is 1 (single output variable
representing phase) and axis 2 ranges over multiple realizations of
the oscillator.
phi=0.0: float
A single oscillation starts and ends at phase phi (by default zero). | entailment |
def circmean(dts, axis=2):
"""Circular mean phase"""
return np.exp(1.0j * dts).mean(axis=axis).angle() | Circular mean phase | entailment |
def order_param(dts, axis=2):
"""Order parameter of phase synchronization"""
return np.abs(np.exp(1.0j * dts).mean(axis=axis)) | Order parameter of phase synchronization | entailment |
def circstd(dts, axis=2):
"""Circular standard deviation"""
R = np.abs(np.exp(1.0j * dts).mean(axis=axis))
return np.sqrt(-2.0 * np.log(R)) | Circular standard deviation | entailment |
def f(self, v, t):
"""Aburn2012 equations right hand side, noise free term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,) array
"""
ret = np.zeros(8)
ret[0] = v[4]
ret[4] = (self.He1*self.ke1*(self.g1*self.S(v[1]-v[2]) + self.u_mean) -
2*self.ke1*v[4] - self.ke1*self.ke1*v[0])
ret[1] = v[5]
ret[5] = (self.He2*self.ke2*(self.g2*self.S(v[0]) + self.p_mean) -
2*self.ke2*v[5] - self.ke2*self.ke2*v[1])
ret[2] = v[6]
ret[6] = (self.Hi*self.ki*self.g4*self.S(v[3]) - 2*self.ki*v[6] -
self.ki*self.ki*v[2])
ret[3] = v[7]
ret[7] = (self.He3*self.ke3*self.g3*self.S(v[1]-v[2]) -
2*self.ke3*v[7] - self.ke3*self.ke3*v[3])
return ret | Aburn2012 equations right hand side, noise free term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,) array | entailment |
def G(self, v, t):
"""Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0].
"""
ret = np.zeros((8, 1))
ret[4,0] = self.ke1 * self.He1 * self.u_sdev
ret[5,0] = self.ke2 * self.He2 * self.p_sdev
return ret | Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0]. | entailment |
def coupling(self, source_y, target_y, weight):
"""How to couple the output of one node to the input of another.
Args:
source_y (array of shape (8,)): state of the source node
target_y (array of shape (8,)): state of the target node
weight (float): the connection strength
Returns:
input (array of shape (8,)): value to drive each variable of the
target node.
"""
v_pyramidal = source_y[1] - source_y[2]
return (np.array([0, 0, 0, 0, 0, 1.0, 0, 0]) *
(weight*self.g1*self.He2*self.ke2*self.S(v_pyramidal))) | How to couple the output of one node to the input of another.
Args:
source_y (array of shape (8,)): state of the source node
target_y (array of shape (8,)): state of the target node
weight (float): the connection strength
Returns:
input (array of shape (8,)): value to drive each variable of the
target node. | entailment |
def hurst(X):
""" Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Notes
--------
Author of this function is Xin Liu
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
0.5057444
"""
X = numpy.array(X)
N = X.size
T = numpy.arange(1, N + 1)
Y = numpy.cumsum(X)
Ave_T = Y / T
S_T = numpy.zeros(N)
R_T = numpy.zeros(N)
for i in range(N):
S_T[i] = numpy.std(X[:i + 1])
X_T = Y - T * Ave_T[i]
R_T[i] = numpy.ptp(X_T[:i + 1])
R_S = R_T / S_T
R_S = numpy.log(R_S)[1:]
n = numpy.log(T)[1:]
A = numpy.column_stack((n, numpy.ones(n.size)))
[m, c] = numpy.linalg.lstsq(A, R_S)[0]
H = m
return H | Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Notes
--------
Author of this function is Xin Liu
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
0.5057444 | entailment |
def embed_seq(X, Tau, D):
"""Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension DE. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X
list
a time series
Tau
integer
the lag or delay when building embedding sequence
D
integer
the embedding dimension
Returns
-------
Y
2-D list
embedding matrix built
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]])
"""
shape = (X.size - Tau * (D - 1), D)
strides = (X.itemsize, Tau * X.itemsize)
return numpy.lib.stride_tricks.as_strided(X, shape=shape, strides=strides) | Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension DE. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X
list
a time series
Tau
integer
the lag or delay when building embedding sequence
D
integer
the embedding dimension
Returns
-------
Y
2-D list
embedding matrix built
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]]) | entailment |
def bin_power(X, Band, Fs):
"""Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
"""
C = numpy.fft.fft(X)
C = abs(C)
Power = numpy.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(
C[numpy.floor(
Freq / Fs * len(X)
): numpy.floor(Next_Freq / Fs * len(X))]
)
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio | Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins. | entailment |
def pfd(X, D=None):
"""Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's difference function.
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
"""
if D is None:
D = numpy.diff(X)
D = D.tolist()
N_delta = 0 # number of sign changes in derivative of the signal
for i in range(1, len(D)):
if D[i] * D[i - 1] < 0:
N_delta += 1
n = len(X)
return numpy.log10(n) / (
numpy.log10(n) + numpy.log10(n / n + 0.4 * N_delta)
) | Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's difference function.
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down. | entailment |
def hfd(X, Kmax):
""" Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter
"""
L = []
x = []
N = len(X)
for k in range(1, Kmax):
Lk = []
for m in range(0, k):
Lmk = 0
for i in range(1, int(numpy.floor((N - m) / k))):
Lmk += abs(X[m + i * k] - X[m + i * k - k])
Lmk = Lmk * (N - 1) / numpy.floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(numpy.log(numpy.mean(Lk)))
x.append([numpy.log(float(1) / k), 1])
(p, r1, r2, s) = numpy.linalg.lstsq(x, L)
return p[0] | Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter | entailment |
def hjorth(X, D=None):
""" Compute Hjorth mobility and complexity of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, a first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's Difference function.
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X
list
a time series
D
list
first order differential sequence of a time series
Returns
-------
As indicated in return line
Hjorth mobility and complexity
"""
if D is None:
D = numpy.diff(X)
D = D.tolist()
D.insert(0, X[0]) # pad the first difference
D = numpy.array(D)
n = len(X)
M2 = float(sum(D ** 2)) / n
TP = sum(numpy.array(X) ** 2)
M4 = 0
for i in range(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
return numpy.sqrt(M2 / TP), numpy.sqrt(
float(M4) * TP / M2 / M2
) | Compute Hjorth mobility and complexity of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, a first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's Difference function.
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X
list
a time series
D
list
first order differential sequence of a time series
Returns
-------
As indicated in return line
Hjorth mobility and complexity | entailment |
def spectral_entropy(X, Band, Fs, Power_Ratio=None):
"""Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins
"""
if Power_Ratio is None:
Power, Power_Ratio = bin_power(X, Band, Fs)
Spectral_Entropy = 0
for i in range(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * numpy.log(Power_Ratio[i])
Spectral_Entropy /= numpy.log(
len(Power_Ratio)
) # to save time, minus one is omitted
return -1 * Spectral_Entropy | Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins | entailment |
def svd_entropy(X, Tau, DE, W=None):
"""Compute SVD Entropy from either two cases below:
1. a time series X, with lag tau and embedding dimension dE (default)
2. a list, W, of normalized singular values of a matrix (if W is provided,
recommend to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Notes
-------------
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
"""
if W is None:
Y = embed_seq(X, Tau, DE)
W = numpy.linalg.svd(Y, compute_uv=0)
W /= sum(W) # normalize singular values
return -1 * sum(W * numpy.log(W)) | Compute SVD Entropy from either two cases below:
1. a time series X, with lag tau and embedding dimension dE (default)
2. a list, W, of normalized singular values of a matrix (if W is provided,
recommend to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Notes
-------------
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down. | entailment |
def ap_entropy(X, M, R):
"""Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
Please be aware that self-match is also counted in ApEn.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series
"""
N = len(X)
Em = embed_seq(X, 1, M)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
D = numpy.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = numpy.max(D, axis=2) <= R
Cm = InRange.mean(axis=0) # Probability that random M-sequences are in range
# M+1-sequences in range iff M-sequences are in range & last values are close
Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
Cmp = numpy.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
# Uncomment for old (miscounted) version
#Cm += 1 / (N - M +1); Cm[-1] -= 1 / (N - M + 1)
#Cmp += 1 / (N - M)
Phi_m, Phi_mp = numpy.sum(numpy.log(Cm)), numpy.sum(numpy.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En | Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
Please be aware that self-match is also counted in ApEn.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series | entailment |
def samp_entropy(X, M, R):
"""Computer sample entropy (SampEn) of series X, specified by M and R.
SampEn is very close to ApEn.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M , we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k.
We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.
The SampEn is defined as log(sum(Cm)/sum(Cmp))
References
----------
Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
ap_entropy: approximate entropy of a time series
"""
N = len(X)
Em = embed_seq(X, 1, M)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
D = numpy.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = numpy.max(D, axis=2) <= R
numpy.fill_diagonal(InRange, 0) # Don't count self-matches
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
Cmp = numpy.logical_and(Dp <= R, InRange[:-1,:-1]).sum(axis=0)
# Uncomment below for old (miscounted) version
#InRange[numpy.triu_indices(len(InRange))] = 0
#InRange = InRange[:-1,:-2]
#Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
#Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
#Dp = Dp[:,:-1]
#Cmp = numpy.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = numpy.log(numpy.sum(Cm + 1e-100) / numpy.sum(Cmp + 1e-100))
return Samp_En | Computer sample entropy (SampEn) of series X, specified by M and R.
SampEn is very close to ApEn.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M , we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k.
We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.
The SampEn is defined as log(sum(Cm)/sum(Cmp))
References
----------
Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
ap_entropy: approximate entropy of a time series | entailment |
def dfa(X, Ave=None, L=None):
"""Compute Detrended Fluctuation Analysis from a time series X and length of
boxes L.
The first step to compute DFA is to integrate the signal. Let original
series be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is obtained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y
into boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128,
...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X:
1-D Python list or numpy array
a time series
Ave:
integer, optional
The average value of the time series
L:
1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha:
integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n). where n is the
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> print(pyeeg.dfa(randn(4096)))
0.490035110345
Reference
---------
Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling
exponents and crossover phenomena in nonstationary heartbeat time series.
_Chaos_ 1995;5:82-87
Notes
-----
This value depends on the box sizes very much. When the input is a white
noise, this value should be 0.5. But, some choices on box sizes can lead to
the value lower or higher than 0.5, e.g. 0.38 or 0.58.
Based on many test, I set the box sizes from 1/5 of signal length to one
(x-5)-th of the signal length, where x is the nearest power of 2 from the
length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
You may generate a list of box sizes and pass in such a list as a
parameter.
"""
X = numpy.array(X)
if Ave is None:
Ave = numpy.mean(X)
Y = numpy.cumsum(X)
Y -= Ave
if L is None:
L = numpy.floor(len(X) * 1 / (
2 ** numpy.array(list(range(4, int(numpy.log2(len(X))) - 4))))
)
F = numpy.zeros(len(L)) # F(n) of different given box length n
for i in range(0, len(L)):
n = int(L[i]) # for each box length L[i]
if n == 0:
print("time series is too short while the box length is too big")
print("abort")
exit()
for j in range(0, len(X), n): # for each box
if j + n < len(X):
c = list(range(j, j + n))
# coordinates of time in the box
c = numpy.vstack([c, numpy.ones(n)]).T
# the value of data in the box
y = Y[j:j + n]
# add residue in this box
F[i] += numpy.linalg.lstsq(c, y)[1]
F[i] /= ((len(X) / n) * n)
F = numpy.sqrt(F)
Alpha = numpy.linalg.lstsq(numpy.vstack(
[numpy.log(L), numpy.ones(len(L))]
).T, numpy.log(F))[0][0]
return Alpha | Compute Detrended Fluctuation Analysis from a time series X and length of
boxes L.
The first step to compute DFA is to integrate the signal. Let original
series be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is obtained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y
into boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128,
...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X:
1-D Python list or numpy array
a time series
Ave:
integer, optional
The average value of the time series
L:
1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha:
integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n). where n is the
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> print(pyeeg.dfa(randn(4096)))
0.490035110345
Reference
---------
Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling
exponents and crossover phenomena in nonstationary heartbeat time series.
_Chaos_ 1995;5:82-87
Notes
-----
This value depends on the box sizes very much. When the input is a white
noise, this value should be 0.5. But, some choices on box sizes can lead to
the value lower or higher than 0.5, e.g. 0.38 or 0.58.
Based on many test, I set the box sizes from 1/5 of signal length to one
(x-5)-th of the signal length, where x is the nearest power of 2 from the
length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
You may generate a list of box sizes and pass in such a list as a
parameter. | entailment |
def permutation_entropy(x, n, tau):
"""Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0
"""
PeSeq = []
Em = embed_seq(x, tau, n)
for i in range(0, len(Em)):
r = []
z = []
for j in range(0, len(Em[i])):
z.append(Em[i][j])
for j in range(0, len(Em[i])):
z.sort()
r.append(z.index(Em[i][j]))
z[z.index(Em[i][j])] = -1
PeSeq.append(r)
RankMat = []
while len(PeSeq) > 0:
RankMat.append(PeSeq.count(PeSeq[0]))
x = PeSeq[0]
for j in range(0, PeSeq.count(PeSeq[0])):
PeSeq.pop(PeSeq.index(x))
RankMat = numpy.array(RankMat)
RankMat = numpy.true_divide(RankMat, RankMat.sum())
EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat)
PE = -1 * EntropyMat.sum()
return PE | Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0 | entailment |
def information_based_similarity(x, y, n):
"""Calculates the information based similarity of two time series x
and y.
Parameters
----------
x
list
a time series
y
list
a time series
n
integer
word order
Returns
----------
IBS
float
Information based similarity
Notes
----------
Information based similarity is a measure of dissimilarity between
two time series. Let the sequences be x and y. Each sequence is first
replaced by its first ordered difference(Encoder). Calculating the
Heaviside of the resulting sequences, we get two binary sequences,
SymbolicSeq. Using PyEEG function, embed_seq, with lag of 1 and dimension
of n, we build an embedding matrix from the latter sequence.
Each row of this embedding matrix is called a word. Information based
similarity measures the distance between two sequence by comparing the
rank of words in the sequences; more explicitly, the distance, D, is
calculated using the formula:
"1/2^(n-1) * sum( abs(Rank(0)(k)-R(1)(k)) * F(k) )" where Rank(0)(k)
and Rank(1)(k) are the rank of the k-th word in each of the input
sequences. F(k) is a modified "shannon" weighing function that increases
the weight of each word in the calculations when they are more frequent in
the sequences.
It is advisable to calculate IBS for numerical sequences using 8-tupple
words.
References
----------
Yang AC, Hseu SS, Yien HW, Goldberger AL, Peng CK: Linguistic analysis of
the human heartbeat using frequency and rank order statistics. Phys Rev
Lett 2003, 90: 108103
Examples
----------
>>> import pyeeg
>>> from numpy.random import randn
>>> x = randn(100)
>>> y = randn(100)
>>> pyeeg.information_based_similarity(x,y,8)
0.64512947848249214
"""
Wordlist = []
Space = [[0, 0], [0, 1], [1, 0], [1, 1]]
Sample = [0, 1]
if (n == 1):
Wordlist = Sample
if (n == 2):
Wordlist = Space
elif (n > 1):
Wordlist = Space
Buff = []
for k in range(0, n - 2):
Buff = []
for i in range(0, len(Wordlist)):
Buff.append(tuple(Wordlist[i]))
Buff = tuple(Buff)
Wordlist = []
for i in range(0, len(Buff)):
for j in range(0, len(Sample)):
Wordlist.append(list(Buff[i]))
Wordlist[len(Wordlist) - 1].append(Sample[j])
Wordlist.sort()
Input = [[], []]
Input[0] = x
Input[1] = y
SymbolicSeq = [[], []]
for i in range(0, 2):
Encoder = numpy.diff(Input[i])
for j in range(0, len(Input[i]) - 1):
if(Encoder[j] > 0):
SymbolicSeq[i].append(1)
else:
SymbolicSeq[i].append(0)
Wm = []
Wm.append(embed_seq(SymbolicSeq[0], 1, n).tolist())
Wm.append(embed_seq(SymbolicSeq[1], 1, n).tolist())
Count = [[], []]
for i in range(0, 2):
for k in range(0, len(Wordlist)):
Count[i].append(Wm[i].count(Wordlist[k]))
Prob = [[], []]
for i in range(0, 2):
Sigma = 0
for j in range(0, len(Wordlist)):
Sigma += Count[i][j]
for k in range(0, len(Wordlist)):
Prob[i].append(numpy.true_divide(Count[i][k], Sigma))
Entropy = [[], []]
for i in range(0, 2):
for k in range(0, len(Wordlist)):
if (Prob[i][k] == 0):
Entropy[i].append(0)
else:
Entropy[i].append(Prob[i][k] * (numpy.log2(Prob[i][k])))
Rank = [[], []]
Buff = [[], []]
Buff[0] = tuple(Count[0])
Buff[1] = tuple(Count[1])
for i in range(0, 2):
Count[i].sort()
Count[i].reverse()
for k in range(0, len(Wordlist)):
Rank[i].append(Count[i].index(Buff[i][k]))
Count[i][Count[i].index(Buff[i][k])] = -1
IBS = 0
Z = 0
n = 0
for k in range(0, len(Wordlist)):
if ((Buff[0][k] != 0) & (Buff[1][k] != 0)):
F = -Entropy[0][k] - Entropy[1][k]
IBS += numpy.multiply(numpy.absolute(Rank[0][k] - Rank[1][k]), F)
Z += F
else:
n += 1
IBS = numpy.true_divide(IBS, Z)
IBS = numpy.true_divide(IBS, len(Wordlist) - n)
return IBS | Calculates the information based similarity of two time series x
and y.
Parameters
----------
x
list
a time series
y
list
a time series
n
integer
word order
Returns
----------
IBS
float
Information based similarity
Notes
----------
Information based similarity is a measure of dissimilarity between
two time series. Let the sequences be x and y. Each sequence is first
replaced by its first ordered difference(Encoder). Calculating the
Heaviside of the resulting sequences, we get two binary sequences,
SymbolicSeq. Using PyEEG function, embed_seq, with lag of 1 and dimension
of n, we build an embedding matrix from the latter sequence.
Each row of this embedding matrix is called a word. Information based
similarity measures the distance between two sequence by comparing the
rank of words in the sequences; more explicitly, the distance, D, is
calculated using the formula:
"1/2^(n-1) * sum( abs(Rank(0)(k)-R(1)(k)) * F(k) )" where Rank(0)(k)
and Rank(1)(k) are the rank of the k-th word in each of the input
sequences. F(k) is a modified "shannon" weighing function that increases
the weight of each word in the calculations when they are more frequent in
the sequences.
It is advisable to calculate IBS for numerical sequences using 8-tupple
words.
References
----------
Yang AC, Hseu SS, Yien HW, Goldberger AL, Peng CK: Linguistic analysis of
the human heartbeat using frequency and rank order statistics. Phys Rev
Lett 2003, 90: 108103
Examples
----------
>>> import pyeeg
>>> from numpy.random import randn
>>> x = randn(100)
>>> y = randn(100)
>>> pyeeg.information_based_similarity(x,y,8)
0.64512947848249214 | entailment |
def LLE(x, tau, n, T, fs):
"""Calculate largest Lyauponov exponent of a given time series x using
Rosenstein algorithm.
Parameters
----------
x
list
a time series
n
integer
embedding dimension
tau
integer
Embedding lag
fs
integer
Sampling frequency
T
integer
Mean period
Returns
----------
Lexp
float
Largest Lyapunov Exponent
Notes
----------
A n-dimensional trajectory is first reconstructed from the observed data by
use of embedding delay of tau, using pyeeg function, embed_seq(x, tau, n).
Algorithm then searches for nearest neighbour of each point on the
reconstructed trajectory; temporal separation of nearest neighbours must be
greater than mean period of the time series: the mean period can be
estimated as the reciprocal of the mean frequency in power spectrum
Each pair of nearest neighbours is assumed to diverge exponentially at a
rate given by largest Lyapunov exponent. Now having a collection of
neighbours, a least square fit to the average exponential divergence is
calculated. The slope of this line gives an accurate estimate of the
largest Lyapunov exponent.
References
----------
Rosenstein, Michael T., James J. Collins, and Carlo J. De Luca. "A
practical method for calculating largest Lyapunov exponents from small data
sets." Physica D: Nonlinear Phenomena 65.1 (1993): 117-134.
Examples
----------
>>> import pyeeg
>>> X = numpy.array([3,4,1,2,4,51,4,32,24,12,3,45])
>>> pyeeg.LLE(X,2,4,1,1)
>>> 0.18771136179353307
"""
Em = embed_seq(x, tau, n)
M = len(Em)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
square_dists = (A - B) ** 2 # square_dists[i,j,k] = (Em[i][k]-Em[j][k])^2
D = numpy.sqrt(square_dists[:,:,:].sum(axis=2)) # D[i,j] = ||Em[i]-Em[j]||_2
# Exclude elements within T of the diagonal
band = numpy.tri(D.shape[0], k=T) - numpy.tri(D.shape[0], k=-T-1)
band[band == 1] = numpy.inf
neighbors = (D + band).argmin(axis=0) # nearest neighbors more than T steps away
# in_bounds[i,j] = (i+j <= M-1 and i+neighbors[j] <= M-1)
inc = numpy.tile(numpy.arange(M), (M, 1))
row_inds = (numpy.tile(numpy.arange(M), (M, 1)).T + inc)
col_inds = (numpy.tile(neighbors, (M, 1)) + inc.T)
in_bounds = numpy.logical_and(row_inds <= M - 1, col_inds <= M - 1)
# Uncomment for old (miscounted) version
#in_bounds = numpy.logical_and(row_inds < M - 1, col_inds < M - 1)
row_inds[-in_bounds] = 0
col_inds[-in_bounds] = 0
# neighbor_dists[i,j] = ||Em[i+j]-Em[i+neighbors[j]]||_2
neighbor_dists = numpy.ma.MaskedArray(D[row_inds, col_inds], -in_bounds)
J = (-neighbor_dists.mask).sum(axis=1) # number of in-bounds indices by row
# Set invalid (zero) values to 1; log(1) = 0 so sum is unchanged
neighbor_dists[neighbor_dists == 0] = 1
d_ij = numpy.sum(numpy.log(neighbor_dists.data), axis=1)
mean_d = d_ij[J > 0] / J[J > 0]
x = numpy.arange(len(mean_d))
X = numpy.vstack((x, numpy.ones(len(mean_d)))).T
[m, c] = numpy.linalg.lstsq(X, mean_d)[0]
Lexp = fs * m
return Lexp | Calculate largest Lyauponov exponent of a given time series x using
Rosenstein algorithm.
Parameters
----------
x
list
a time series
n
integer
embedding dimension
tau
integer
Embedding lag
fs
integer
Sampling frequency
T
integer
Mean period
Returns
----------
Lexp
float
Largest Lyapunov Exponent
Notes
----------
A n-dimensional trajectory is first reconstructed from the observed data by
use of embedding delay of tau, using pyeeg function, embed_seq(x, tau, n).
Algorithm then searches for nearest neighbour of each point on the
reconstructed trajectory; temporal separation of nearest neighbours must be
greater than mean period of the time series: the mean period can be
estimated as the reciprocal of the mean frequency in power spectrum
Each pair of nearest neighbours is assumed to diverge exponentially at a
rate given by largest Lyapunov exponent. Now having a collection of
neighbours, a least square fit to the average exponential divergence is
calculated. The slope of this line gives an accurate estimate of the
largest Lyapunov exponent.
References
----------
Rosenstein, Michael T., James J. Collins, and Carlo J. De Luca. "A
practical method for calculating largest Lyapunov exponents from small data
sets." Physica D: Nonlinear Phenomena 65.1 (1993): 117-134.
Examples
----------
>>> import pyeeg
>>> X = numpy.array([3,4,1,2,4,51,4,32,24,12,3,45])
>>> pyeeg.LLE(X,2,4,1,1)
>>> 0.18771136179353307 | entailment |
def mod2pi(ts):
"""For a timeseries where all variables represent phases (in radians),
return an equivalent timeseries where all values are in the range (-pi, pi]
"""
return np.pi - np.mod(np.pi - ts, 2*np.pi) | For a timeseries where all variables represent phases (in radians),
return an equivalent timeseries where all values are in the range (-pi, pi] | entailment |
def phase_crossings(ts, phi=0.0):
"""For a single variable timeseries representing the phase of an oscillator,
find the times at which the phase crosses angle phi,
with the condition that the phase must visit phi+pi between crossings.
(Thus if noise causes the phase to wander back and forth across angle phi
without the oscillator doing a full revolution, then this is recorded as
a single crossing event, giving the time of the earliest arrival.)
If the timeseries begins (or ends) exactly at phi, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last oscillations are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): Critical phase angle (radians) at which to report crossings.
Returns:
array of float
"""
#TODO support multivariate time series
ts = ts.squeeze()
if ts.ndim is not 1:
raise ValueError('Currently can only use on single variable timeseries')
# Interpret the timeseries as belonging to a phase variable.
# Map its range to the interval (-pi, pi] with critical angle at zero:
ts = mod2pi(ts - phi)
tsa = ts[0:-1]
tsb = ts[1:]
p2 = np.pi/2
# Time indices where phase crosses or reaches zero from below or above
zc = np.nonzero((tsa > -p2) & (tsa < 0) & (tsb >= 0) & (tsb < p2) |
(tsa < p2) & (tsa > 0) & (tsb <= 0) & (tsb > -p2))[0] + 1
# Estimate crossing time interpolated linearly within a single time step
va = ts[zc-1]
vb = ts[zc]
ct = (np.abs(vb)*ts.tspan[zc-1] +
np.abs(va)*ts.tspan[zc]) / np.abs(vb - va) # denominator always !=0
# Also include starting time if we started exactly at zero
if ts[0] == 0.0:
zc = np.r_[np.array([0]), zc]
ct = np.r_[np.array([ts.tspan[0]]), ct]
# Time indices where phase crosses pi
pc = np.nonzero((tsa > p2) & (tsb < -p2) | (tsa < -p2) & (tsb > p2))[0] + 1
# Select those zero-crossings separated by at least one pi-crossing
splice = np.searchsorted(pc, zc)
which_zc = np.r_[np.array([0]), np.nonzero(splice[0:-1] - splice[1:])[0] +1]
if ct.shape[0] is 0:
return ct
else:
return ct[which_zc] | For a single variable timeseries representing the phase of an oscillator,
find the times at which the phase crosses angle phi,
with the condition that the phase must visit phi+pi between crossings.
(Thus if noise causes the phase to wander back and forth across angle phi
without the oscillator doing a full revolution, then this is recorded as
a single crossing event, giving the time of the earliest arrival.)
If the timeseries begins (or ends) exactly at phi, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last oscillations are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): Critical phase angle (radians) at which to report crossings.
Returns:
array of float | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.