repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.shuffle_song | def shuffle_song(
self, song, *, num_songs=100, only_library=False, recently_played=None
):
"""Get a listing of song shuffle/mix songs.
Parameters:
song (dict): A song dict.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``100``
only_library (bool, Optional): Only return content from library.
Default: False
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
list: List of artist shuffle/mix songs.
"""
station_info = {
'num_entries': num_songs,
'library_content_only': only_library
}
if 'storeId' in song:
station_info['seed'] = {
'trackId': song['storeId'],
'seedType': StationSeedType.store_track.value
}
else:
station_info['seed'] = {
'trackLockerId': song['id'],
'seedType': StationSeedType.library_track.value
}
if recently_played is not None:
station_info['recently_played'] = recently_played
response = self._call(mc_calls.RadioStationFeed, station_infos=[station_info])
station_feed = response.body.get('data', {}).get('stations', [])
try:
station = station_feed[0]
except IndexError:
station = {}
return station.get('tracks', []) | python | def shuffle_song(
self, song, *, num_songs=100, only_library=False, recently_played=None
):
"""Get a listing of song shuffle/mix songs.
Parameters:
song (dict): A song dict.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``100``
only_library (bool, Optional): Only return content from library.
Default: False
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
list: List of artist shuffle/mix songs.
"""
station_info = {
'num_entries': num_songs,
'library_content_only': only_library
}
if 'storeId' in song:
station_info['seed'] = {
'trackId': song['storeId'],
'seedType': StationSeedType.store_track.value
}
else:
station_info['seed'] = {
'trackLockerId': song['id'],
'seedType': StationSeedType.library_track.value
}
if recently_played is not None:
station_info['recently_played'] = recently_played
response = self._call(mc_calls.RadioStationFeed, station_infos=[station_info])
station_feed = response.body.get('data', {}).get('stations', [])
try:
station = station_feed[0]
except IndexError:
station = {}
return station.get('tracks', []) | Get a listing of song shuffle/mix songs.
Parameters:
song (dict): A song dict.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``100``
only_library (bool, Optional): Only return content from library.
Default: False
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
list: List of artist shuffle/mix songs. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1465-L1510 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.situations | def situations(self, *, tz_offset=None):
"""Get a listing of situations.
Parameters:
tz_offset (int, Optional): A time zone offset from UTC in seconds.
"""
response = self._call(
mc_calls.ListenNowSituations,
tz_offset
)
situation_list = response.body.get('situations', [])
return situation_list | python | def situations(self, *, tz_offset=None):
"""Get a listing of situations.
Parameters:
tz_offset (int, Optional): A time zone offset from UTC in seconds.
"""
response = self._call(
mc_calls.ListenNowSituations,
tz_offset
)
situation_list = response.body.get('situations', [])
return situation_list | Get a listing of situations.
Parameters:
tz_offset (int, Optional): A time zone offset from UTC in seconds. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1512-L1525 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.song | def song(self, song_id):
"""Get information about a song.
Parameters:
song_id (str): A song ID.
Returns:
dict: Song information.
"""
if song_id.startswith('T'):
song_info = self._call(
mc_calls.FetchTrack,
song_id
).body
else:
song_info = next(
(
song
for song in self.songs()
if song['id'] == song_id
),
None
)
return song_info | python | def song(self, song_id):
"""Get information about a song.
Parameters:
song_id (str): A song ID.
Returns:
dict: Song information.
"""
if song_id.startswith('T'):
song_info = self._call(
mc_calls.FetchTrack,
song_id
).body
else:
song_info = next(
(
song
for song in self.songs()
if song['id'] == song_id
),
None
)
return song_info | Get information about a song.
Parameters:
song_id (str): A song ID.
Returns:
dict: Song information. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1527-L1552 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.songs_add | def songs_add(self, songs):
"""Add store songs to your library.
Parameters:
songs (list): A list of store song dicts.
Returns:
list: Songs' library IDs.
"""
mutations = [mc_calls.TrackBatch.add(song) for song in songs]
response = self._call(
mc_calls.TrackBatch,
mutations
)
success_ids = [
res['id']
for res in response.body['mutate_response']
if res['response_code'] == 'OK'
]
return success_ids | python | def songs_add(self, songs):
"""Add store songs to your library.
Parameters:
songs (list): A list of store song dicts.
Returns:
list: Songs' library IDs.
"""
mutations = [mc_calls.TrackBatch.add(song) for song in songs]
response = self._call(
mc_calls.TrackBatch,
mutations
)
success_ids = [
res['id']
for res in response.body['mutate_response']
if res['response_code'] == 'OK'
]
return success_ids | Add store songs to your library.
Parameters:
songs (list): A list of store song dicts.
Returns:
list: Songs' library IDs. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1566-L1588 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.songs_delete | def songs_delete(self, songs):
"""Delete songs from library.
Parameters:
song (list): A list of song dicts.
Returns:
list: Successfully deleted song IDs.
"""
mutations = [mc_calls.TrackBatch.delete(song['id']) for song in songs]
response = self._call(
mc_calls.TrackBatch,
mutations
)
success_ids = [
res['id']
for res in response.body['mutate_response']
if res['response_code'] == 'OK'
]
# TODO: Report failures.
# failure_ids = [
# res['id']
# for res in response.body['mutate_response']
# if res['response_code'] != 'OK'
# ]
return success_ids | python | def songs_delete(self, songs):
"""Delete songs from library.
Parameters:
song (list): A list of song dicts.
Returns:
list: Successfully deleted song IDs.
"""
mutations = [mc_calls.TrackBatch.delete(song['id']) for song in songs]
response = self._call(
mc_calls.TrackBatch,
mutations
)
success_ids = [
res['id']
for res in response.body['mutate_response']
if res['response_code'] == 'OK'
]
# TODO: Report failures.
# failure_ids = [
# res['id']
# for res in response.body['mutate_response']
# if res['response_code'] != 'OK'
# ]
return success_ids | Delete songs from library.
Parameters:
song (list): A list of song dicts.
Returns:
list: Successfully deleted song IDs. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1602-L1631 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.song_play | def song_play(self, song):
"""Add play to song play count.
Parameters:
song (dict): A song dict.
Returns:
bool: ``True`` if successful, ``False`` if not.
"""
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
song_duration = song['durationMillis']
event = mc_calls.ActivityRecordRealtime.play(song_id, song_duration)
response = self._call(
mc_calls.ActivityRecordRealtime,
event
)
return True if response.body['eventResults'][0]['code'] == 'OK' else False | python | def song_play(self, song):
"""Add play to song play count.
Parameters:
song (dict): A song dict.
Returns:
bool: ``True`` if successful, ``False`` if not.
"""
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
song_duration = song['durationMillis']
event = mc_calls.ActivityRecordRealtime.play(song_id, song_duration)
response = self._call(
mc_calls.ActivityRecordRealtime,
event
)
return True if response.body['eventResults'][0]['code'] == 'OK' else False | Add play to song play count.
Parameters:
song (dict): A song dict.
Returns:
bool: ``True`` if successful, ``False`` if not. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1633-L1658 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.song_rate | def song_rate(self, song, rating):
"""Rate song.
Parameters:
song (dict): A song dict.
rating (int): 0 (not rated), 1 (thumbs down), or 5 (thumbs up).
Returns:
bool: ``True`` if successful, ``False`` if not.
"""
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
event = mc_calls.ActivityRecordRealtime.rate(song_id, rating)
response = self._call(
mc_calls.ActivityRecordRealtime,
event
)
return True if response.body['eventResults'][0]['code'] == 'OK' else False | python | def song_rate(self, song, rating):
"""Rate song.
Parameters:
song (dict): A song dict.
rating (int): 0 (not rated), 1 (thumbs down), or 5 (thumbs up).
Returns:
bool: ``True`` if successful, ``False`` if not.
"""
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
event = mc_calls.ActivityRecordRealtime.rate(song_id, rating)
response = self._call(
mc_calls.ActivityRecordRealtime,
event
)
return True if response.body['eventResults'][0]['code'] == 'OK' else False | Rate song.
Parameters:
song (dict): A song dict.
rating (int): 0 (not rated), 1 (thumbs down), or 5 (thumbs up).
Returns:
bool: ``True`` if successful, ``False`` if not. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1660-L1684 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.songs | def songs(self):
"""Get a listing of library songs.
Returns:
list: Song dicts.
"""
song_list = []
for chunk in self.songs_iter(page_size=49995):
song_list.extend(chunk)
return song_list | python | def songs(self):
"""Get a listing of library songs.
Returns:
list: Song dicts.
"""
song_list = []
for chunk in self.songs_iter(page_size=49995):
song_list.extend(chunk)
return song_list | Get a listing of library songs.
Returns:
list: Song dicts. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1686-L1697 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.songs_iter | def songs_iter(self, *, page_size=250):
"""Get a paged iterator of library songs.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Song dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.TrackFeed,
max_results=page_size,
start_token=start_token
)
items = response.body.get('data', {}).get('items', [])
if items:
yield items
start_token = response.body.get('nextPageToken')
if start_token is None:
break | python | def songs_iter(self, *, page_size=250):
"""Get a paged iterator of library songs.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Song dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.TrackFeed,
max_results=page_size,
start_token=start_token
)
items = response.body.get('data', {}).get('items', [])
if items:
yield items
start_token = response.body.get('nextPageToken')
if start_token is None:
break | Get a paged iterator of library songs.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Song dicts. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1699-L1726 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.station | def station(self, station_id, *, num_songs=25, recently_played=None):
"""Get information about a station.
Parameters:
station_id (str): A station ID. Use 'IFL' for I'm Feeling Lucky.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
dict: Station information.
"""
station_info = {
'station_id': station_id,
'num_entries': num_songs,
'library_content_only': False
}
if recently_played is not None:
station_info['recently_played'] = recently_played
response = self._call(
mc_calls.RadioStationFeed,
station_infos=[station_info]
)
station_feed = response.body.get('data', {}).get('stations', [])
try:
station = station_feed[0]
except IndexError:
station = {}
return station | python | def station(self, station_id, *, num_songs=25, recently_played=None):
"""Get information about a station.
Parameters:
station_id (str): A station ID. Use 'IFL' for I'm Feeling Lucky.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
dict: Station information.
"""
station_info = {
'station_id': station_id,
'num_entries': num_songs,
'library_content_only': False
}
if recently_played is not None:
station_info['recently_played'] = recently_played
response = self._call(
mc_calls.RadioStationFeed,
station_infos=[station_info]
)
station_feed = response.body.get('data', {}).get('stations', [])
try:
station = station_feed[0]
except IndexError:
station = {}
return station | Get information about a station.
Parameters:
station_id (str): A station ID. Use 'IFL' for I'm Feeling Lucky.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
dict: Station information. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1730-L1764 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.station_feed | def station_feed(self, *, num_songs=25, num_stations=4):
"""Generate stations.
Note:
A Google Music subscription is required.
Parameters:
num_songs (int, Optional): The total number of songs to return. Default: ``25``
num_stations (int, Optional): The number of stations to return when no station_infos is provided.
Default: ``5``
Returns:
list: Station information dicts.
"""
response = self._call(
mc_calls.RadioStationFeed,
num_entries=num_songs,
num_stations=num_stations
)
station_feed = response.body.get('data', {}).get('stations', [])
return station_feed | python | def station_feed(self, *, num_songs=25, num_stations=4):
"""Generate stations.
Note:
A Google Music subscription is required.
Parameters:
num_songs (int, Optional): The total number of songs to return. Default: ``25``
num_stations (int, Optional): The number of stations to return when no station_infos is provided.
Default: ``5``
Returns:
list: Station information dicts.
"""
response = self._call(
mc_calls.RadioStationFeed,
num_entries=num_songs,
num_stations=num_stations
)
station_feed = response.body.get('data', {}).get('stations', [])
return station_feed | Generate stations.
Note:
A Google Music subscription is required.
Parameters:
num_songs (int, Optional): The total number of songs to return. Default: ``25``
num_stations (int, Optional): The number of stations to return when no station_infos is provided.
Default: ``5``
Returns:
list: Station information dicts. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1767-L1789 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.station_songs | def station_songs(self, station, *, num_songs=25, recently_played=None):
"""Get a listing of songs from a station.
Parameters:
station (str): A station dict.
num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
list: Station song dicts.
"""
station_id = station['id']
station = self.station(
station_id,
num_songs=num_songs,
recently_played=recently_played
)
return station.get('tracks', []) | python | def station_songs(self, station, *, num_songs=25, recently_played=None):
"""Get a listing of songs from a station.
Parameters:
station (str): A station dict.
num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
list: Station song dicts.
"""
station_id = station['id']
station = self.station(
station_id,
num_songs=num_songs,
recently_played=recently_played
)
return station.get('tracks', []) | Get a listing of songs from a station.
Parameters:
station (str): A station dict.
num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
list: Station song dicts. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1791-L1812 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.stations | def stations(self, *, generated=True, library=True):
"""Get a listing of library stations.
The listing can contain stations added to the library and generated from the library.
Parameters:
generated (bool, Optional): Include generated stations.
Default: True
library (bool, Optional): Include library stations.
Default: True
Returns:
list: Station information dicts.
"""
station_list = []
for chunk in self.stations_iter(page_size=49995):
for station in chunk:
if (
(generated and not station.get('inLibrary'))
or (library and station.get('inLibrary'))
):
station_list.append(station)
return station_list | python | def stations(self, *, generated=True, library=True):
"""Get a listing of library stations.
The listing can contain stations added to the library and generated from the library.
Parameters:
generated (bool, Optional): Include generated stations.
Default: True
library (bool, Optional): Include library stations.
Default: True
Returns:
list: Station information dicts.
"""
station_list = []
for chunk in self.stations_iter(page_size=49995):
for station in chunk:
if (
(generated and not station.get('inLibrary'))
or (library and station.get('inLibrary'))
):
station_list.append(station)
return station_list | Get a listing of library stations.
The listing can contain stations added to the library and generated from the library.
Parameters:
generated (bool, Optional): Include generated stations.
Default: True
library (bool, Optional): Include library stations.
Default: True
Returns:
list: Station information dicts. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1814-L1838 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.stations_iter | def stations_iter(self, *, page_size=250):
"""Get a paged iterator of library stations.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Station dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.RadioStation,
max_results=page_size,
start_token=start_token
)
yield response.body.get('data', {}).get('items', [])
start_token = response.body.get('nextPageToken')
if start_token is None:
break | python | def stations_iter(self, *, page_size=250):
"""Get a paged iterator of library stations.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Station dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.RadioStation,
max_results=page_size,
start_token=start_token
)
yield response.body.get('data', {}).get('items', [])
start_token = response.body.get('nextPageToken')
if start_token is None:
break | Get a paged iterator of library stations.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Station dicts. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1840-L1864 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.stream | def stream(self, item, *, device_id=None, quality='hi', session_token=None):
"""Get MP3 stream of a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
bytes: An MP3 file.
"""
if device_id is None:
device_id = self.device_id
stream_url = self.stream_url(
item,
device_id=device_id,
quality=quality,
session_token=session_token
)
response = self.session.get(stream_url)
audio = response.content
return audio | python | def stream(self, item, *, device_id=None, quality='hi', session_token=None):
"""Get MP3 stream of a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
bytes: An MP3 file.
"""
if device_id is None:
device_id = self.device_id
stream_url = self.stream_url(
item,
device_id=device_id,
quality=quality,
session_token=session_token
)
response = self.session.get(stream_url)
audio = response.content
return audio | Get MP3 stream of a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
bytes: An MP3 file. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1866-L1898 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.stream_url | def stream_url(self, item, *, device_id=None, quality='hi', session_token=None):
"""Get a URL to stream a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
str: A URL to an MP3 file.
"""
if device_id is None:
device_id = self.device_id
if 'episodeId' in item: # Podcast episode.
response = self._call(
mc_calls.PodcastEpisodeStreamURL,
item['episodeId'],
quality=quality,
device_id=device_id
)
elif 'wentryid' in item: # Free account station song.
response = self._call(
mc_calls.RadioStationTrackStreamURL,
item['storeId'],
item['wentryid'],
session_token,
quality=quality,
device_id=device_id
)
elif 'trackId' in item: # Playlist song.
response = self._call(
mc_calls.TrackStreamURL,
item['trackId'],
quality=quality,
device_id=device_id
)
elif 'storeId' in item and self.is_subscribed: # Store song.
response = self._call(
mc_calls.TrackStreamURL,
item['storeId'],
quality=quality,
device_id=device_id
)
elif 'id' in item: # Library song.
response = self._call(
mc_calls.TrackStreamURL,
item['id'],
quality=quality,
device_id=device_id
)
else:
# TODO: Create an exception for not being subscribed or use a better builtin exception for this case.
if 'storeId' in item and not self.is_subscribed:
msg = "Can't stream a store song without a subscription."
else:
msg = "Item does not contain an ID field."
raise ValueError(msg)
try:
stream_url = response.headers['Location']
except KeyError:
stream_url = response.body['url']
return stream_url | python | def stream_url(self, item, *, device_id=None, quality='hi', session_token=None):
"""Get a URL to stream a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
str: A URL to an MP3 file.
"""
if device_id is None:
device_id = self.device_id
if 'episodeId' in item: # Podcast episode.
response = self._call(
mc_calls.PodcastEpisodeStreamURL,
item['episodeId'],
quality=quality,
device_id=device_id
)
elif 'wentryid' in item: # Free account station song.
response = self._call(
mc_calls.RadioStationTrackStreamURL,
item['storeId'],
item['wentryid'],
session_token,
quality=quality,
device_id=device_id
)
elif 'trackId' in item: # Playlist song.
response = self._call(
mc_calls.TrackStreamURL,
item['trackId'],
quality=quality,
device_id=device_id
)
elif 'storeId' in item and self.is_subscribed: # Store song.
response = self._call(
mc_calls.TrackStreamURL,
item['storeId'],
quality=quality,
device_id=device_id
)
elif 'id' in item: # Library song.
response = self._call(
mc_calls.TrackStreamURL,
item['id'],
quality=quality,
device_id=device_id
)
else:
# TODO: Create an exception for not being subscribed or use a better builtin exception for this case.
if 'storeId' in item and not self.is_subscribed:
msg = "Can't stream a store song without a subscription."
else:
msg = "Item does not contain an ID field."
raise ValueError(msg)
try:
stream_url = response.headers['Location']
except KeyError:
stream_url = response.body['url']
return stream_url | Get a URL to stream a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
str: A URL to an MP3 file. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1900-L1974 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.thumbs_up_songs | def thumbs_up_songs(self, *, library=True, store=True):
"""Get a listing of 'Thumbs Up' store songs.
Parameters:
library (bool, Optional): Include 'Thumbs Up' songs from library.
Default: True
generated (bool, Optional): Include 'Thumbs Up' songs from store.
Default: True
Returns:
list: Dicts of 'Thumbs Up' songs.
"""
thumbs_up_songs = []
if library is True:
thumbs_up_songs.extend(
song
for song in self.songs()
if song.get('rating', '0') == '5'
)
if store is True:
response = self._call(mc_calls.EphemeralTop)
thumbs_up_songs.extend(response.body.get('data', {}).get('items', []))
return thumbs_up_songs | python | def thumbs_up_songs(self, *, library=True, store=True):
"""Get a listing of 'Thumbs Up' store songs.
Parameters:
library (bool, Optional): Include 'Thumbs Up' songs from library.
Default: True
generated (bool, Optional): Include 'Thumbs Up' songs from store.
Default: True
Returns:
list: Dicts of 'Thumbs Up' songs.
"""
thumbs_up_songs = []
if library is True:
thumbs_up_songs.extend(
song
for song in self.songs()
if song.get('rating', '0') == '5'
)
if store is True:
response = self._call(mc_calls.EphemeralTop)
thumbs_up_songs.extend(response.body.get('data', {}).get('items', []))
return thumbs_up_songs | Get a listing of 'Thumbs Up' store songs.
Parameters:
library (bool, Optional): Include 'Thumbs Up' songs from library.
Default: True
generated (bool, Optional): Include 'Thumbs Up' songs from store.
Default: True
Returns:
list: Dicts of 'Thumbs Up' songs. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1976-L2002 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.top_charts | def top_charts(self):
"""Get a listing of the default top charts."""
response = self._call(mc_calls.BrowseTopChart)
top_charts = response.body
return top_charts | python | def top_charts(self):
"""Get a listing of the default top charts."""
response = self._call(mc_calls.BrowseTopChart)
top_charts = response.body
return top_charts | Get a listing of the default top charts. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L2004-L2010 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.top_charts_for_genre | def top_charts_for_genre(self, genre_id):
"""Get a listing of top charts for a top chart genre.
Parameters:
genre_id (str): A top chart genre ID as found with :meth:`top_charts_genres`.
"""
response = self._call(mc_calls.BrowseTopChartForGenre, genre_id)
top_chart_for_genre = response.body
return top_chart_for_genre | python | def top_charts_for_genre(self, genre_id):
"""Get a listing of top charts for a top chart genre.
Parameters:
genre_id (str): A top chart genre ID as found with :meth:`top_charts_genres`.
"""
response = self._call(mc_calls.BrowseTopChartForGenre, genre_id)
top_chart_for_genre = response.body
return top_chart_for_genre | Get a listing of top charts for a top chart genre.
Parameters:
genre_id (str): A top chart genre ID as found with :meth:`top_charts_genres`. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L2012-L2022 |
thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.top_charts_genres | def top_charts_genres(self):
"""Get a listing of genres from the browse top charts tab."""
response = self._call(mc_calls.BrowseTopChartGenres)
top_chart_genres = response.body.get('genres', [])
return top_chart_genres | python | def top_charts_genres(self):
"""Get a listing of genres from the browse top charts tab."""
response = self._call(mc_calls.BrowseTopChartGenres)
top_chart_genres = response.body.get('genres', [])
return top_chart_genres | Get a listing of genres from the browse top charts tab. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L2024-L2030 |
anrosent/LT-code | lt/decode/__main__.py | run | def run(stream=sys.stdin.buffer):
"""Reads from stream, applying the LT decoding algorithm
to incoming encoded blocks until sufficiently many blocks
have been received to reconstruct the entire file.
"""
payload = decode.decode(stream)
sys.stdout.write(payload.decode('utf8')) | python | def run(stream=sys.stdin.buffer):
"""Reads from stream, applying the LT decoding algorithm
to incoming encoded blocks until sufficiently many blocks
have been received to reconstruct the entire file.
"""
payload = decode.decode(stream)
sys.stdout.write(payload.decode('utf8')) | Reads from stream, applying the LT decoding algorithm
to incoming encoded blocks until sufficiently many blocks
have been received to reconstruct the entire file. | https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/decode/__main__.py#L14-L20 |
anrosent/LT-code | lt/encode/__init__.py | _split_file | def _split_file(f, blocksize):
"""Block file byte contents into blocksize chunks, padding last one if necessary
"""
f_bytes = f.read()
blocks = [int.from_bytes(f_bytes[i:i+blocksize].ljust(blocksize, b'0'), sys.byteorder)
for i in range(0, len(f_bytes), blocksize)]
return len(f_bytes), blocks | python | def _split_file(f, blocksize):
"""Block file byte contents into blocksize chunks, padding last one if necessary
"""
f_bytes = f.read()
blocks = [int.from_bytes(f_bytes[i:i+blocksize].ljust(blocksize, b'0'), sys.byteorder)
for i in range(0, len(f_bytes), blocksize)]
return len(f_bytes), blocks | Block file byte contents into blocksize chunks, padding last one if necessary | https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/encode/__init__.py#L7-L14 |
anrosent/LT-code | lt/encode/__init__.py | encoder | def encoder(f, blocksize, seed=None, c=sampler.DEFAULT_C, delta=sampler.DEFAULT_DELTA):
"""Generates an infinite sequence of blocks to transmit
to the receiver
"""
# Generate seed if not provided
if seed is None:
seed = randint(0, 1 << 31 - 1)
# get file blocks
filesize, blocks = _split_file(f, blocksize)
# init stream vars
K = len(blocks)
prng = sampler.PRNG(params=(K, delta, c))
prng.set_seed(seed)
# block generation loop
while True:
blockseed, d, ix_samples = prng.get_src_blocks()
block_data = 0
for ix in ix_samples:
block_data ^= blocks[ix]
# Generate blocks of XORed data in network byte order
block = (filesize, blocksize, blockseed, int.to_bytes(block_data, blocksize, sys.byteorder))
yield pack('!III%ss'%blocksize, *block) | python | def encoder(f, blocksize, seed=None, c=sampler.DEFAULT_C, delta=sampler.DEFAULT_DELTA):
"""Generates an infinite sequence of blocks to transmit
to the receiver
"""
# Generate seed if not provided
if seed is None:
seed = randint(0, 1 << 31 - 1)
# get file blocks
filesize, blocks = _split_file(f, blocksize)
# init stream vars
K = len(blocks)
prng = sampler.PRNG(params=(K, delta, c))
prng.set_seed(seed)
# block generation loop
while True:
blockseed, d, ix_samples = prng.get_src_blocks()
block_data = 0
for ix in ix_samples:
block_data ^= blocks[ix]
# Generate blocks of XORed data in network byte order
block = (filesize, blocksize, blockseed, int.to_bytes(block_data, blocksize, sys.byteorder))
yield pack('!III%ss'%blocksize, *block) | Generates an infinite sequence of blocks to transmit
to the receiver | https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/encode/__init__.py#L17-L43 |
anrosent/LT-code | lt/decode/__init__.py | _read_block | def _read_block(blocksize, stream):
"""Read block data from network into integer type
"""
blockdata = stream.read(blocksize)
return int.from_bytes(blockdata, 'big') | python | def _read_block(blocksize, stream):
"""Read block data from network into integer type
"""
blockdata = stream.read(blocksize)
return int.from_bytes(blockdata, 'big') | Read block data from network into integer type | https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/decode/__init__.py#L145-L149 |
anrosent/LT-code | lt/decode/__init__.py | read_blocks | def read_blocks(stream):
"""Generate parsed blocks from input stream
"""
while True:
header = _read_header(stream)
block = _read_block(header[1], stream)
yield (header, block) | python | def read_blocks(stream):
"""Generate parsed blocks from input stream
"""
while True:
header = _read_header(stream)
block = _read_block(header[1], stream)
yield (header, block) | Generate parsed blocks from input stream | https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/decode/__init__.py#L151-L157 |
anrosent/LT-code | lt/decode/__init__.py | BlockGraph.add_block | def add_block(self, nodes, data):
"""Adds a new check node and edges between that node and all
source nodes it connects, resolving all message passes that
become possible as a result.
"""
# We can eliminate this source node
if len(nodes) == 1:
to_eliminate = list(self.eliminate(next(iter(nodes)), data))
# Recursively eliminate all nodes that can now be resolved
while len(to_eliminate):
other, check = to_eliminate.pop()
to_eliminate.extend(self.eliminate(other, check))
else:
# Pass messages from already-resolved source nodes
for node in list(nodes):
if node in self.eliminated:
nodes.remove(node)
data ^= self.eliminated[node]
# Resolve if we are left with a single non-resolved source node
if len(nodes) == 1:
return self.add_block(nodes, data)
else:
# Add edges for all remaining nodes to this check
check = CheckNode(nodes, data)
for node in nodes:
self.checks[node].append(check)
# Are we done yet?
return len(self.eliminated) >= self.num_blocks | python | def add_block(self, nodes, data):
"""Adds a new check node and edges between that node and all
source nodes it connects, resolving all message passes that
become possible as a result.
"""
# We can eliminate this source node
if len(nodes) == 1:
to_eliminate = list(self.eliminate(next(iter(nodes)), data))
# Recursively eliminate all nodes that can now be resolved
while len(to_eliminate):
other, check = to_eliminate.pop()
to_eliminate.extend(self.eliminate(other, check))
else:
# Pass messages from already-resolved source nodes
for node in list(nodes):
if node in self.eliminated:
nodes.remove(node)
data ^= self.eliminated[node]
# Resolve if we are left with a single non-resolved source node
if len(nodes) == 1:
return self.add_block(nodes, data)
else:
# Add edges for all remaining nodes to this check
check = CheckNode(nodes, data)
for node in nodes:
self.checks[node].append(check)
# Are we done yet?
return len(self.eliminated) >= self.num_blocks | Adds a new check node and edges between that node and all
source nodes it connects, resolving all message passes that
become possible as a result. | https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/decode/__init__.py#L29-L62 |
anrosent/LT-code | lt/decode/__init__.py | BlockGraph.eliminate | def eliminate(self, node, data):
"""Resolves a source node, passing the message to all associated checks
"""
# Cache resolved value
self.eliminated[node] = data
others = self.checks[node]
del self.checks[node]
# Pass messages to all associated checks
for check in others:
check.check ^= data
check.src_nodes.remove(node)
# Yield all nodes that can now be resolved
if len(check.src_nodes) == 1:
yield (next(iter(check.src_nodes)), check.check) | python | def eliminate(self, node, data):
"""Resolves a source node, passing the message to all associated checks
"""
# Cache resolved value
self.eliminated[node] = data
others = self.checks[node]
del self.checks[node]
# Pass messages to all associated checks
for check in others:
check.check ^= data
check.src_nodes.remove(node)
# Yield all nodes that can now be resolved
if len(check.src_nodes) == 1:
yield (next(iter(check.src_nodes)), check.check) | Resolves a source node, passing the message to all associated checks | https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/decode/__init__.py#L64-L80 |
thebigmunch/google-music | src/google_music/api.py | mobileclient | def mobileclient(username=None, device_id=None, *, token=None, locale='en_US'):
"""Create and authenticate a Google Music mobile client.
>>> import google_music
>>> mc = google_music.mobileclient('username')
Parameters:
username (str, Optional): Your Google Music username.
This is used to store OAuth credentials for different accounts separately.
device_id (str, Optional): A mobile device ID. Default: MAC address is used.
token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``.
locale (str, Optional): `ICU <http://www.localeplanet.com/icu/>`__ locale used to localize some
responses. This must be a locale supported by Android. Default: `'en_US'``.
Returns:
MobileClient: An authenticated :class:`~google_music.MobileClient` instance.
"""
return MobileClient(
username,
device_id,
token=token,
locale=locale
) | python | def mobileclient(username=None, device_id=None, *, token=None, locale='en_US'):
"""Create and authenticate a Google Music mobile client.
>>> import google_music
>>> mc = google_music.mobileclient('username')
Parameters:
username (str, Optional): Your Google Music username.
This is used to store OAuth credentials for different accounts separately.
device_id (str, Optional): A mobile device ID. Default: MAC address is used.
token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``.
locale (str, Optional): `ICU <http://www.localeplanet.com/icu/>`__ locale used to localize some
responses. This must be a locale supported by Android. Default: `'en_US'``.
Returns:
MobileClient: An authenticated :class:`~google_music.MobileClient` instance.
"""
return MobileClient(
username,
device_id,
token=token,
locale=locale
) | Create and authenticate a Google Music mobile client.
>>> import google_music
>>> mc = google_music.mobileclient('username')
Parameters:
username (str, Optional): Your Google Music username.
This is used to store OAuth credentials for different accounts separately.
device_id (str, Optional): A mobile device ID. Default: MAC address is used.
token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``.
locale (str, Optional): `ICU <http://www.localeplanet.com/icu/>`__ locale used to localize some
responses. This must be a locale supported by Android. Default: `'en_US'``.
Returns:
MobileClient: An authenticated :class:`~google_music.MobileClient` instance. | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/api.py#L6-L29 |
Zsailer/phylopandas | phylopandas/treeio/read.py | _dendropy_to_dataframe | def _dendropy_to_dataframe(
tree,
add_node_labels=True,
use_uids=True):
"""Convert Dendropy tree to Pandas dataframe."""
# Maximum distance from root.
tree.max_distance_from_root()
# Initialize the data object.
idx = []
data = {
'type': [],
'id': [],
'parent': [],
'length': [],
'label': [],
'distance': []}
if use_uids:
data['uid'] = []
# Add labels to internal nodes if set to true.
if add_node_labels:
for i, node in enumerate(tree.internal_nodes()):
node.label = str(i)
for node in tree.nodes():
# Get node type
if node.is_leaf():
type_ = 'leaf'
label = str(node.taxon.label).replace(' ', '_')
elif node.is_internal():
type_ = 'node'
label = str(node.label)
# Set node label and parent.
id_ = label
parent_node = node.parent_node
length = node.edge_length
distance = node.distance_from_root()
# Is this node a root?
if parent_node is None and length is None:
parent_label = None
parent_node = None
length = 0
distance = 0
type_ = 'root'
# Set parent node label
elif parent_node.is_internal():
parent_label = str(parent_node.label)
else:
raise Exception("Subtree is not attached to tree?")
# Add this node to the data.
data['type'].append(type_)
data['id'].append(id_)
data['parent'].append(parent_label)
data['length'].append(length)
data['label'].append(label)
data['distance'].append(distance)
if use_uids:
data['uid'].append(get_random_id(10))
# Construct dataframe.
df = pandas.DataFrame(data)
return df | python | def _dendropy_to_dataframe(
tree,
add_node_labels=True,
use_uids=True):
"""Convert Dendropy tree to Pandas dataframe."""
# Maximum distance from root.
tree.max_distance_from_root()
# Initialize the data object.
idx = []
data = {
'type': [],
'id': [],
'parent': [],
'length': [],
'label': [],
'distance': []}
if use_uids:
data['uid'] = []
# Add labels to internal nodes if set to true.
if add_node_labels:
for i, node in enumerate(tree.internal_nodes()):
node.label = str(i)
for node in tree.nodes():
# Get node type
if node.is_leaf():
type_ = 'leaf'
label = str(node.taxon.label).replace(' ', '_')
elif node.is_internal():
type_ = 'node'
label = str(node.label)
# Set node label and parent.
id_ = label
parent_node = node.parent_node
length = node.edge_length
distance = node.distance_from_root()
# Is this node a root?
if parent_node is None and length is None:
parent_label = None
parent_node = None
length = 0
distance = 0
type_ = 'root'
# Set parent node label
elif parent_node.is_internal():
parent_label = str(parent_node.label)
else:
raise Exception("Subtree is not attached to tree?")
# Add this node to the data.
data['type'].append(type_)
data['id'].append(id_)
data['parent'].append(parent_label)
data['length'].append(length)
data['label'].append(label)
data['distance'].append(distance)
if use_uids:
data['uid'].append(get_random_id(10))
# Construct dataframe.
df = pandas.DataFrame(data)
return df | Convert Dendropy tree to Pandas dataframe. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/read.py#L35-L104 |
Zsailer/phylopandas | phylopandas/treeio/read.py | _read | def _read(
filename=None,
data=None,
schema=None,
add_node_labels=True,
use_uids=True
):
"""Read a phylogenetic tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
newick file to read into DataFrame.
data: str (default is None)
newick string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame.
"""
if filename is not None:
# Use Dendropy to parse tree.
tree = dendropy.Tree.get(
path=filename,
schema=schema,
preserve_underscores=True)
elif data is not None:
tree = dendropy.Tree.get(
data=data,
schema=schema,
preserve_underscores=True)
else:
raise Exception('No tree given?')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df | python | def _read(
filename=None,
data=None,
schema=None,
add_node_labels=True,
use_uids=True
):
"""Read a phylogenetic tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
newick file to read into DataFrame.
data: str (default is None)
newick string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame.
"""
if filename is not None:
# Use Dendropy to parse tree.
tree = dendropy.Tree.get(
path=filename,
schema=schema,
preserve_underscores=True)
elif data is not None:
tree = dendropy.Tree.get(
data=data,
schema=schema,
preserve_underscores=True)
else:
raise Exception('No tree given?')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df | Read a phylogenetic tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
newick file to read into DataFrame.
data: str (default is None)
newick string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/read.py#L107-L158 |
Zsailer/phylopandas | phylopandas/treeio/read.py | _read_function | def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename=None,
data=None,
add_node_labels=True,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
data=data,
schema=schema,
add_node_labels=add_node_labels,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func | python | def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename=None,
data=None,
add_node_labels=True,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
data=data,
schema=schema,
add_node_labels=add_node_labels,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func | Add a write method for named schema to a class. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/read.py#L189-L209 |
Zsailer/phylopandas | phylopandas/seqio/write.py | pandas_df_to_biopython_seqrecord | def pandas_df_to_biopython_seqrecord(
df,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
):
"""Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords.
"""
seq_records = []
for i, row in df.iterrows():
# Tries getting sequence data. If a TypeError at the seqrecord
# creation is thrown, it is assumed that this row does not contain
# sequence data and therefore the row is ignored.
try:
# Get sequence
seq = Seq(row[sequence_col], alphabet=alphabet)
# Get id
id = row[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([row[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records.append(record)
except TypeError:
pass
return seq_records | python | def pandas_df_to_biopython_seqrecord(
df,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
):
"""Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords.
"""
seq_records = []
for i, row in df.iterrows():
# Tries getting sequence data. If a TypeError at the seqrecord
# creation is thrown, it is assumed that this row does not contain
# sequence data and therefore the row is ignored.
try:
# Get sequence
seq = Seq(row[sequence_col], alphabet=alphabet)
# Get id
id = row[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([row[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records.append(record)
except TypeError:
pass
return seq_records | Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L34-L93 |
Zsailer/phylopandas | phylopandas/seqio/write.py | pandas_series_to_biopython_seqrecord | def pandas_series_to_biopython_seqrecord(
series,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None
):
"""Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords.
"""
# Get sequence
seq = Seq(series[sequence_col], alphabet=alphabet)
# Get id
id = series[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([series[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records = [record]
return seq_records | python | def pandas_series_to_biopython_seqrecord(
series,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None
):
"""Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords.
"""
# Get sequence
seq = Seq(series[sequence_col], alphabet=alphabet)
# Get id
id = series[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([series[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records = [record]
return seq_records | Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L95-L142 |
Zsailer/phylopandas | phylopandas/seqio/write.py | _write | def _write(
data,
filename=None,
schema='fasta',
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
"""General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
# Build a list of records from a pandas DataFrame
if type(data) is pd.DataFrame:
seq_records = pandas_df_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Build a record from a pandas Series
elif type(data) is pd.Series:
seq_records = pandas_series_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Write to disk or return string
if filename is not None:
SeqIO.write(seq_records, filename, format=schema, **kwargs)
else:
return "".join([s.format(schema) for s in seq_records]) | python | def _write(
data,
filename=None,
schema='fasta',
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
"""General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
# Build a list of records from a pandas DataFrame
if type(data) is pd.DataFrame:
seq_records = pandas_df_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Build a record from a pandas Series
elif type(data) is pd.Series:
seq_records = pandas_series_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Write to disk or return string
if filename is not None:
SeqIO.write(seq_records, filename, format=schema, **kwargs)
else:
return "".join([s.format(schema) for s in seq_records]) | General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L144-L207 |
Zsailer/phylopandas | phylopandas/seqio/write.py | _write_method | def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method | python | def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method | Add a write method for named schema to a class. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L209-L234 |
Zsailer/phylopandas | phylopandas/seqio/write.py | _write_function | def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func | python | def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func | Add a write method for named schema to a class. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L237-L262 |
Zsailer/phylopandas | phylopandas/seqio/read.py | _read | def _read(
filename,
schema,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
"""Use BioPython's sequence parsing module to convert any file format to
a Pandas DataFrame.
The resulting DataFrame has the following columns:
- name
- id
- description
- sequence
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
kwargs.update(alphabet=alphabet)
# Prepare DataFrame fields.
data = {
'id': [],
seq_label: [],
'description': [],
'label': []
}
if use_uids:
data['uid'] = []
# Parse Fasta file.
for i, s in enumerate(SeqIO.parse(filename, format=schema, **kwargs)):
data['id'].append(s.id)
data[seq_label].append(str(s.seq))
data['description'].append(s.description)
data['label'].append(s.name)
if use_uids:
data['uid'].append(get_random_id(10))
# Port to DataFrame.
return pd.DataFrame(data) | python | def _read(
filename,
schema,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
"""Use BioPython's sequence parsing module to convert any file format to
a Pandas DataFrame.
The resulting DataFrame has the following columns:
- name
- id
- description
- sequence
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
kwargs.update(alphabet=alphabet)
# Prepare DataFrame fields.
data = {
'id': [],
seq_label: [],
'description': [],
'label': []
}
if use_uids:
data['uid'] = []
# Parse Fasta file.
for i, s in enumerate(SeqIO.parse(filename, format=schema, **kwargs)):
data['id'].append(s.id)
data[seq_label].append(str(s.seq))
data['description'].append(s.description)
data['label'].append(s.name)
if use_uids:
data['uid'].append(get_random_id(10))
# Port to DataFrame.
return pd.DataFrame(data) | Use BioPython's sequence parsing module to convert any file format to
a Pandas DataFrame.
The resulting DataFrame has the following columns:
- name
- id
- description
- sequence | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L37-L88 |
Zsailer/phylopandas | phylopandas/seqio/read.py | _read_method | def _read_method(schema):
"""Add a write method for named schema to a class.
"""
def func(
self,
filename,
seq_label='sequence',
alphabet=None,
combine_on='uid',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func | python | def _read_method(schema):
"""Add a write method for named schema to a class.
"""
def func(
self,
filename,
seq_label='sequence',
alphabet=None,
combine_on='uid',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func | Add a write method for named schema to a class. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L91-L116 |
Zsailer/phylopandas | phylopandas/seqio/read.py | _read_function | def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func | python | def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func | Add a write method for named schema to a class. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L119-L139 |
Zsailer/phylopandas | phylopandas/seqio/read.py | read_blast_xml | def read_blast_xml(filename, **kwargs):
"""Read BLAST XML format."""
# Read file.
with open(filename, 'r') as f:
blast_record = NCBIXML.read(f)
# Prepare DataFrame fields.
data = {'accession': [],
'hit_def': [],
'hit_id': [],
'title': [],
'length': [],
'e_value': [],
'sequence': []}
# Get alignments from blast result.
for i, s in enumerate(blast_record.alignments):
data['accession'] = s.accession
data['hit_def'] = s.hit_def
data['hit_id'] = s.hit_id
data['title'] = s.title
data['length'] = s.length
data['e_value'] = s.hsps[0].expect
data['sequence'] = s.hsps[0].sbjct
# Port to DataFrame.
return pd.DataFrame(data) | python | def read_blast_xml(filename, **kwargs):
"""Read BLAST XML format."""
# Read file.
with open(filename, 'r') as f:
blast_record = NCBIXML.read(f)
# Prepare DataFrame fields.
data = {'accession': [],
'hit_def': [],
'hit_id': [],
'title': [],
'length': [],
'e_value': [],
'sequence': []}
# Get alignments from blast result.
for i, s in enumerate(blast_record.alignments):
data['accession'] = s.accession
data['hit_def'] = s.hit_def
data['hit_id'] = s.hit_id
data['title'] = s.title
data['length'] = s.length
data['e_value'] = s.hsps[0].expect
data['sequence'] = s.hsps[0].sbjct
# Port to DataFrame.
return pd.DataFrame(data) | Read BLAST XML format. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L154-L180 |
Zsailer/phylopandas | phylopandas/treeio/write.py | _pandas_df_to_dendropy_tree | def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
"""Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree | python | def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
"""Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree | Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/write.py#L31-L126 |
Zsailer/phylopandas | phylopandas/treeio/write.py | _write | def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
"""Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
# Write out format
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema) | python | def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
"""Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
# Write out format
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema) | Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/write.py#L129-L182 |
Zsailer/phylopandas | phylopandas/treeio/write.py | _write_method | def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method | python | def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method | Add a write method for named schema to a class. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/write.py#L185-L212 |
Zsailer/phylopandas | phylopandas/treeio/write.py | _write_function | def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func | python | def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func | Add a write method for named schema to a class. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/write.py#L215-L242 |
Zsailer/phylopandas | phylopandas/utils.py | get_random_id | def get_random_id(length):
"""Generate a random, alpha-numerical id."""
alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(alphabet) for _ in range(length)) | python | def get_random_id(length):
"""Generate a random, alpha-numerical id."""
alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(alphabet) for _ in range(length)) | Generate a random, alpha-numerical id. | https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/utils.py#L4-L7 |
nicolas-van/pygreen | pygreen.py | PyGreen.set_folder | def set_folder(self, folder):
"""
Sets the folder where the files to serve are located.
"""
self.folder = folder
self.templates.directories[0] = folder
self.app.root_path = folder | python | def set_folder(self, folder):
"""
Sets the folder where the files to serve are located.
"""
self.folder = folder
self.templates.directories[0] = folder
self.app.root_path = folder | Sets the folder where the files to serve are located. | https://github.com/nicolas-van/pygreen/blob/41d433edb408f86278cf95269fabf3acc00c9119/pygreen.py#L81-L87 |
nicolas-van/pygreen | pygreen.py | PyGreen.run | def run(self, host='0.0.0.0', port=8080):
"""
Launch a development web server.
"""
waitress.serve(self.app, host=host, port=port) | python | def run(self, host='0.0.0.0', port=8080):
"""
Launch a development web server.
"""
waitress.serve(self.app, host=host, port=port) | Launch a development web server. | https://github.com/nicolas-van/pygreen/blob/41d433edb408f86278cf95269fabf3acc00c9119/pygreen.py#L89-L93 |
nicolas-van/pygreen | pygreen.py | PyGreen.get | def get(self, path):
"""
Get the content of a file, indentified by its path relative to the folder configured
in PyGreen. If the file extension is one of the extensions that should be processed
through Mako, it will be processed.
"""
data = self.app.test_client().get("/%s" % path).data
return data | python | def get(self, path):
"""
Get the content of a file, indentified by its path relative to the folder configured
in PyGreen. If the file extension is one of the extensions that should be processed
through Mako, it will be processed.
"""
data = self.app.test_client().get("/%s" % path).data
return data | Get the content of a file, indentified by its path relative to the folder configured
in PyGreen. If the file extension is one of the extensions that should be processed
through Mako, it will be processed. | https://github.com/nicolas-van/pygreen/blob/41d433edb408f86278cf95269fabf3acc00c9119/pygreen.py#L95-L102 |
nicolas-van/pygreen | pygreen.py | PyGreen.gen_static | def gen_static(self, output_folder):
"""
Generates a complete static version of the web site. It will stored in
output_folder.
"""
files = []
for l in self.file_listers:
files += l()
for f in files:
_logger.info("generating %s" % f)
content = self.get(f)
loc = os.path.join(output_folder, f)
d = os.path.dirname(loc)
if not os.path.exists(d):
os.makedirs(d)
with open(loc, "wb") as file_:
file_.write(content) | python | def gen_static(self, output_folder):
"""
Generates a complete static version of the web site. It will stored in
output_folder.
"""
files = []
for l in self.file_listers:
files += l()
for f in files:
_logger.info("generating %s" % f)
content = self.get(f)
loc = os.path.join(output_folder, f)
d = os.path.dirname(loc)
if not os.path.exists(d):
os.makedirs(d)
with open(loc, "wb") as file_:
file_.write(content) | Generates a complete static version of the web site. It will stored in
output_folder. | https://github.com/nicolas-van/pygreen/blob/41d433edb408f86278cf95269fabf3acc00c9119/pygreen.py#L104-L120 |
nicolas-van/pygreen | pygreen.py | PyGreen.cli | def cli(self, cmd_args=None):
"""
The command line interface of PyGreen.
"""
logging.basicConfig(level=logging.INFO, format='%(message)s')
parser = argparse.ArgumentParser(description='PyGreen, micro web framework/static web site generator')
subparsers = parser.add_subparsers(dest='action')
parser_serve = subparsers.add_parser('serve', help='serve the web site')
parser_serve.add_argument('-p', '--port', type=int, default=8080, help='port to serve on')
parser_serve.add_argument('-f', '--folder', default=".", help='folder containg files to serve')
parser_serve.add_argument('-d', '--disable-templates', action='store_true', default=False, help='just serve static files, do not use Mako')
def serve():
if args.disable_templates:
self.template_exts = set([])
self.run(port=args.port)
parser_serve.set_defaults(func=serve)
parser_gen = subparsers.add_parser('gen', help='generate a static version of the site')
parser_gen.add_argument('output', help='folder to store the files')
parser_gen.add_argument('-f', '--folder', default=".", help='folder containing files to generate')
def gen():
self.gen_static(args.output)
parser_gen.set_defaults(func=gen)
args = parser.parse_args(cmd_args)
self.set_folder(args.folder)
print(parser.description)
print("")
args.func() | python | def cli(self, cmd_args=None):
"""
The command line interface of PyGreen.
"""
logging.basicConfig(level=logging.INFO, format='%(message)s')
parser = argparse.ArgumentParser(description='PyGreen, micro web framework/static web site generator')
subparsers = parser.add_subparsers(dest='action')
parser_serve = subparsers.add_parser('serve', help='serve the web site')
parser_serve.add_argument('-p', '--port', type=int, default=8080, help='port to serve on')
parser_serve.add_argument('-f', '--folder', default=".", help='folder containg files to serve')
parser_serve.add_argument('-d', '--disable-templates', action='store_true', default=False, help='just serve static files, do not use Mako')
def serve():
if args.disable_templates:
self.template_exts = set([])
self.run(port=args.port)
parser_serve.set_defaults(func=serve)
parser_gen = subparsers.add_parser('gen', help='generate a static version of the site')
parser_gen.add_argument('output', help='folder to store the files')
parser_gen.add_argument('-f', '--folder', default=".", help='folder containing files to generate')
def gen():
self.gen_static(args.output)
parser_gen.set_defaults(func=gen)
args = parser.parse_args(cmd_args)
self.set_folder(args.folder)
print(parser.description)
print("")
args.func() | The command line interface of PyGreen. | https://github.com/nicolas-van/pygreen/blob/41d433edb408f86278cf95269fabf3acc00c9119/pygreen.py#L125-L155 |
internap/fake-switches | fake_switches/command_processing/base_command_processor.py | BaseCommandProcessor.init | def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args):
"""
:type switch_configuration: fake_switches.switch_configuration.SwitchConfiguration
:type terminal_controller: fake_switches.terminal.TerminalController
:type logger: logging.Logger
:type piping_processor: fake_switches.command_processing.piping_processor_base.PipingProcessorBase
"""
self.switch_configuration = switch_configuration
self.terminal_controller = terminal_controller
self.logger = logger
self.piping_processor = piping_processor
self.sub_processor = None
self.continuing_to = None
self.is_done = False
self.replace_input = False
self.awaiting_keystroke = False | python | def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args):
"""
:type switch_configuration: fake_switches.switch_configuration.SwitchConfiguration
:type terminal_controller: fake_switches.terminal.TerminalController
:type logger: logging.Logger
:type piping_processor: fake_switches.command_processing.piping_processor_base.PipingProcessorBase
"""
self.switch_configuration = switch_configuration
self.terminal_controller = terminal_controller
self.logger = logger
self.piping_processor = piping_processor
self.sub_processor = None
self.continuing_to = None
self.is_done = False
self.replace_input = False
self.awaiting_keystroke = False | :type switch_configuration: fake_switches.switch_configuration.SwitchConfiguration
:type terminal_controller: fake_switches.terminal.TerminalController
:type logger: logging.Logger
:type piping_processor: fake_switches.command_processing.piping_processor_base.PipingProcessorBase | https://github.com/internap/fake-switches/blob/ea5f77f0c73493497fb43ce59f3c75b52ce9bac8/fake_switches/command_processing/base_command_processor.py#L19-L35 |
urschrei/Circles | Circles/circles.py | _gccalc | def _gccalc(lon, lat, azimuth, maxdist=None):
"""
Original javascript on http://williams.best.vwh.net/gccalc.htm
Translated into python by Thomas Lecocq
This function is a black box, because trigonometry is difficult
"""
glat1 = lat * np.pi / 180.
glon1 = lon * np.pi / 180.
s = maxdist / 1.852243
faz = azimuth * np.pi / 180.
EPS = 0.00000000005
if ((np.abs(np.cos(glat1)) < EPS) and not (np.abs(np.sin(faz)) < EPS)):
raise CourseException("Only North-South courses are meaningful")
a = 6378.137 / 1.852243
f = 1 / 298.257223563
r = 1 - f
tu = r * np.tan(glat1)
sf = np.sin(faz)
cf = np.cos(faz)
if (cf == 0):
b = 0.
else:
b = 2. * np.arctan2 (tu, cf)
cu = 1. / np.sqrt(1 + tu * tu)
su = tu * cu
sa = cu * sf
c2a = 1 - sa * sa
x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.))
x = (x - 2.) / x
c = 1. - x
c = (x * x / 4. + 1.) / c
d = (0.375 * x * x - 1.) * x
tu = s / (r * a * c)
y = tu
c = y + 1
while (np.abs (y - c) > EPS):
sy = np.sin(y)
cy = np.cos(y)
cz = np.cos(b + y)
e = 2. * cz * cz - 1.
c = y
x = e * cy
y = e + e - 1.
y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) *
d / 4. - cz) * sy * d + tu
b = cu * cy * cf - su * sy
c = r * np.sqrt(sa * sa + b * b)
d = su * cy + cu * sy * cf
glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi
c = cu * cy - su * sy * cf
x = np.arctan2(sy * sf, c)
c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16.
d = ((e * cy * c + cz) * sy * c + y) * sa
glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi
baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi)
glon2 *= 180./np.pi
glat2 *= 180./np.pi
baz *= 180./np.pi
return (glon2, glat2, baz) | python | def _gccalc(lon, lat, azimuth, maxdist=None):
"""
Original javascript on http://williams.best.vwh.net/gccalc.htm
Translated into python by Thomas Lecocq
This function is a black box, because trigonometry is difficult
"""
glat1 = lat * np.pi / 180.
glon1 = lon * np.pi / 180.
s = maxdist / 1.852243
faz = azimuth * np.pi / 180.
EPS = 0.00000000005
if ((np.abs(np.cos(glat1)) < EPS) and not (np.abs(np.sin(faz)) < EPS)):
raise CourseException("Only North-South courses are meaningful")
a = 6378.137 / 1.852243
f = 1 / 298.257223563
r = 1 - f
tu = r * np.tan(glat1)
sf = np.sin(faz)
cf = np.cos(faz)
if (cf == 0):
b = 0.
else:
b = 2. * np.arctan2 (tu, cf)
cu = 1. / np.sqrt(1 + tu * tu)
su = tu * cu
sa = cu * sf
c2a = 1 - sa * sa
x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.))
x = (x - 2.) / x
c = 1. - x
c = (x * x / 4. + 1.) / c
d = (0.375 * x * x - 1.) * x
tu = s / (r * a * c)
y = tu
c = y + 1
while (np.abs (y - c) > EPS):
sy = np.sin(y)
cy = np.cos(y)
cz = np.cos(b + y)
e = 2. * cz * cz - 1.
c = y
x = e * cy
y = e + e - 1.
y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) *
d / 4. - cz) * sy * d + tu
b = cu * cy * cf - su * sy
c = r * np.sqrt(sa * sa + b * b)
d = su * cy + cu * sy * cf
glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi
c = cu * cy - su * sy * cf
x = np.arctan2(sy * sf, c)
c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16.
d = ((e * cy * c + cz) * sy * c + y) * sa
glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi
baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi)
glon2 *= 180./np.pi
glat2 *= 180./np.pi
baz *= 180./np.pi
return (glon2, glat2, baz) | Original javascript on http://williams.best.vwh.net/gccalc.htm
Translated into python by Thomas Lecocq
This function is a black box, because trigonometry is difficult | https://github.com/urschrei/Circles/blob/5aab401b470935e816a28d7ba817eb72f9344672/Circles/circles.py#L30-L95 |
urschrei/Circles | Circles/circles.py | circle | def circle(m, centerlon, centerlat, radius, *args, **kwargs):
"""
Return lon, lat tuples of a "circle" which matches the chosen Basemap projection
Takes the following arguments:
m = basemap instance
centerlon = originating lon
centrelat = originating lat
radius = radius
"""
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in range(0, 360):
glon2, glat2, baz = _gccalc(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
proj_x, proj_y = m(X,Y)
return zip(proj_x, proj_y) | python | def circle(m, centerlon, centerlat, radius, *args, **kwargs):
"""
Return lon, lat tuples of a "circle" which matches the chosen Basemap projection
Takes the following arguments:
m = basemap instance
centerlon = originating lon
centrelat = originating lat
radius = radius
"""
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in range(0, 360):
glon2, glat2, baz = _gccalc(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
proj_x, proj_y = m(X,Y)
return zip(proj_x, proj_y) | Return lon, lat tuples of a "circle" which matches the chosen Basemap projection
Takes the following arguments:
m = basemap instance
centerlon = originating lon
centrelat = originating lat
radius = radius | https://github.com/urschrei/Circles/blob/5aab401b470935e816a28d7ba817eb72f9344672/Circles/circles.py#L98-L120 |
martinblech/mimerender | src/mimerender.py | register_mime | def register_mime(shortname, mime_types):
"""
Register a new mime type.
Usage example:
mimerender.register_mime('svg', ('application/x-svg', 'application/svg+xml',))
After this you can do:
@mimerender.mimerender(svg=render_svg)
def GET(...
...
"""
if shortname in _MIME_TYPES:
raise MimeRenderException('"%s" has already been registered'%shortname)
_MIME_TYPES[shortname] = mime_types | python | def register_mime(shortname, mime_types):
"""
Register a new mime type.
Usage example:
mimerender.register_mime('svg', ('application/x-svg', 'application/svg+xml',))
After this you can do:
@mimerender.mimerender(svg=render_svg)
def GET(...
...
"""
if shortname in _MIME_TYPES:
raise MimeRenderException('"%s" has already been registered'%shortname)
_MIME_TYPES[shortname] = mime_types | Register a new mime type.
Usage example:
mimerender.register_mime('svg', ('application/x-svg', 'application/svg+xml',))
After this you can do:
@mimerender.mimerender(svg=render_svg)
def GET(...
... | https://github.com/martinblech/mimerender/blob/8bcd05337402c93cf595ff26a8710ec3b1a19b04/src/mimerender.py#L63-L75 |
martinblech/mimerender | src/mimerender.py | wsgi_wrap | def wsgi_wrap(app):
'''
Wraps a standard wsgi application e.g.:
def app(environ, start_response)
It intercepts the start_response callback and grabs the results from it
so it can return the status, headers, and body as a tuple
'''
@wraps(app)
def wrapped(environ, start_response):
status_headers = [None, None]
def _start_response(status, headers):
status_headers[:] = [status, headers]
body = app(environ, _start_response)
ret = body, status_headers[0], status_headers[1]
return ret
return wrapped | python | def wsgi_wrap(app):
'''
Wraps a standard wsgi application e.g.:
def app(environ, start_response)
It intercepts the start_response callback and grabs the results from it
so it can return the status, headers, and body as a tuple
'''
@wraps(app)
def wrapped(environ, start_response):
status_headers = [None, None]
def _start_response(status, headers):
status_headers[:] = [status, headers]
body = app(environ, _start_response)
ret = body, status_headers[0], status_headers[1]
return ret
return wrapped | Wraps a standard wsgi application e.g.:
def app(environ, start_response)
It intercepts the start_response callback and grabs the results from it
so it can return the status, headers, and body as a tuple | https://github.com/martinblech/mimerender/blob/8bcd05337402c93cf595ff26a8710ec3b1a19b04/src/mimerender.py#L412-L427 |
martinblech/mimerender | src/mimerender.py | WSGIMimeRender | def WSGIMimeRender(*args, **kwargs):
'''
A wrapper for _WSGIMimeRender that wrapps the
inner callable with wsgi_wrap first.
'''
def wrapper(*args2, **kwargs2):
# take the function
def wrapped(f):
return _WSGIMimeRender(*args, **kwargs)(*args2, **kwargs2)(wsgi_wrap(f))
return wrapped
return wrapper | python | def WSGIMimeRender(*args, **kwargs):
'''
A wrapper for _WSGIMimeRender that wrapps the
inner callable with wsgi_wrap first.
'''
def wrapper(*args2, **kwargs2):
# take the function
def wrapped(f):
return _WSGIMimeRender(*args, **kwargs)(*args2, **kwargs2)(wsgi_wrap(f))
return wrapped
return wrapper | A wrapper for _WSGIMimeRender that wrapps the
inner callable with wsgi_wrap first. | https://github.com/martinblech/mimerender/blob/8bcd05337402c93cf595ff26a8710ec3b1a19b04/src/mimerender.py#L452-L462 |
martinblech/mimerender | src/mimerender.py | MimeRenderBase.map_exceptions | def map_exceptions(self, mapping, *args, **kwargs):
"""
Exception mapping helper decorator. Takes the same arguments as the
main decorator, plus `mapping`, which is a list of
`(exception_class, status_line)` pairs.
"""
@self.__call__(*args, **kwargs)
def helper(e, status):
return dict(exception=e), status
def wrap(target):
@wraps(target)
def wrapper(*args, **kwargs):
try:
return target(*args, **kwargs)
except BaseException as e:
for klass, status in mapping:
if isinstance(e, klass):
return helper(e, status)
raise
return wrapper
return wrap | python | def map_exceptions(self, mapping, *args, **kwargs):
"""
Exception mapping helper decorator. Takes the same arguments as the
main decorator, plus `mapping`, which is a list of
`(exception_class, status_line)` pairs.
"""
@self.__call__(*args, **kwargs)
def helper(e, status):
return dict(exception=e), status
def wrap(target):
@wraps(target)
def wrapper(*args, **kwargs):
try:
return target(*args, **kwargs)
except BaseException as e:
for klass, status in mapping:
if isinstance(e, klass):
return helper(e, status)
raise
return wrapper
return wrap | Exception mapping helper decorator. Takes the same arguments as the
main decorator, plus `mapping`, which is a list of
`(exception_class, status_line)` pairs. | https://github.com/martinblech/mimerender/blob/8bcd05337402c93cf595ff26a8710ec3b1a19b04/src/mimerender.py#L276-L297 |
marrow/uri | uri/uri.py | URI.relative | def relative(self):
"""Identify if this URI is relative to some "current context".
For example, if the protocol is missing, it's protocol-relative. If the host is missing, it's host-relative, etc.
"""
scheme = self.scheme
if not scheme:
return True
return scheme.is_relative(self) | python | def relative(self):
"""Identify if this URI is relative to some "current context".
For example, if the protocol is missing, it's protocol-relative. If the host is missing, it's host-relative, etc.
"""
scheme = self.scheme
if not scheme:
return True
return scheme.is_relative(self) | Identify if this URI is relative to some "current context".
For example, if the protocol is missing, it's protocol-relative. If the host is missing, it's host-relative, etc. | https://github.com/marrow/uri/blob/1d8220f11111920cd625a0a32ba6a354edead825/uri/uri.py#L243-L254 |
marrow/uri | uri/uri.py | URI.resolve | def resolve(self, uri=None, **parts):
"""Attempt to resolve a new URI given an updated URI, partial or complete."""
if uri:
result = self.__class__(urljoin(str(self), str(uri)))
else:
result = self.__class__(self)
for part, value in parts.items():
if part not in self.__all_parts__:
raise TypeError("Unknown URI component: " + part)
setattr(result, part, value)
return result | python | def resolve(self, uri=None, **parts):
"""Attempt to resolve a new URI given an updated URI, partial or complete."""
if uri:
result = self.__class__(urljoin(str(self), str(uri)))
else:
result = self.__class__(self)
for part, value in parts.items():
if part not in self.__all_parts__:
raise TypeError("Unknown URI component: " + part)
setattr(result, part, value)
return result | Attempt to resolve a new URI given an updated URI, partial or complete. | https://github.com/marrow/uri/blob/1d8220f11111920cd625a0a32ba6a354edead825/uri/uri.py#L256-L270 |
pylover/khayyam | khayyam/jalali_date.py | JalaliDate.strptime | def strptime(cls, date_string, fmt):
"""
This is opposite of the :py:meth:`khayyam.JalaliDate.strftime`,
and used to parse date strings into date object.
`ValueError` is raised if the date_string and format can’t be
parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a
complete list of formatting directives, see :doc:`/directives`.
:param date_string:
:param fmt:
:return: A :py:class:`khayyam.JalaliDate` corresponding to date_string, parsed according to format
:rtype: :py:class:`khayyam.JalaiDate`
"""
# noinspection PyUnresolvedReferences
result = cls.formatterfactory(fmt).parse(date_string)
result = {k: v for k, v in result.items() if k in ('year', 'month', 'day')}
return cls(**result) | python | def strptime(cls, date_string, fmt):
"""
This is opposite of the :py:meth:`khayyam.JalaliDate.strftime`,
and used to parse date strings into date object.
`ValueError` is raised if the date_string and format can’t be
parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a
complete list of formatting directives, see :doc:`/directives`.
:param date_string:
:param fmt:
:return: A :py:class:`khayyam.JalaliDate` corresponding to date_string, parsed according to format
:rtype: :py:class:`khayyam.JalaiDate`
"""
# noinspection PyUnresolvedReferences
result = cls.formatterfactory(fmt).parse(date_string)
result = {k: v for k, v in result.items() if k in ('year', 'month', 'day')}
return cls(**result) | This is opposite of the :py:meth:`khayyam.JalaliDate.strftime`,
and used to parse date strings into date object.
`ValueError` is raised if the date_string and format can’t be
parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a
complete list of formatting directives, see :doc:`/directives`.
:param date_string:
:param fmt:
:return: A :py:class:`khayyam.JalaliDate` corresponding to date_string, parsed according to format
:rtype: :py:class:`khayyam.JalaiDate` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_date.py#L155-L173 |
pylover/khayyam | khayyam/jalali_date.py | JalaliDate.replace | def replace(self, year=None, month=None, day=None):
"""
Replaces the given arguments on this instance, and return a new instance.
:param year:
:param month:
:param day:
:return: A :py:class:`khayyam.JalaliDate` with the same attributes, except for those
attributes given new values by which keyword arguments are specified.
"""
return JalaliDate(
year if year else self.year,
month if month else self.month,
day if day else self.day
) | python | def replace(self, year=None, month=None, day=None):
"""
Replaces the given arguments on this instance, and return a new instance.
:param year:
:param month:
:param day:
:return: A :py:class:`khayyam.JalaliDate` with the same attributes, except for those
attributes given new values by which keyword arguments are specified.
"""
return JalaliDate(
year if year else self.year,
month if month else self.month,
day if day else self.day
) | Replaces the given arguments on this instance, and return a new instance.
:param year:
:param month:
:param day:
:return: A :py:class:`khayyam.JalaliDate` with the same attributes, except for those
attributes given new values by which keyword arguments are specified. | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_date.py#L210-L225 |
pylover/khayyam | khayyam/jalali_date.py | JalaliDate.todate | def todate(self):
"""
Calculates the corresponding day in the gregorian calendar. this is the main use case of this library.
:return: Corresponding date in gregorian calendar.
:rtype: :py:class:`datetime.date`
"""
arr = get_gregorian_date_from_julian_day(self.tojulianday())
return datetime.date(int(arr[0]), int(arr[1]), int(arr[2])) | python | def todate(self):
"""
Calculates the corresponding day in the gregorian calendar. this is the main use case of this library.
:return: Corresponding date in gregorian calendar.
:rtype: :py:class:`datetime.date`
"""
arr = get_gregorian_date_from_julian_day(self.tojulianday())
return datetime.date(int(arr[0]), int(arr[1]), int(arr[2])) | Calculates the corresponding day in the gregorian calendar. this is the main use case of this library.
:return: Corresponding date in gregorian calendar.
:rtype: :py:class:`datetime.date` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_date.py#L227-L235 |
pylover/khayyam | khayyam/jalali_date.py | JalaliDate.timetuple | def timetuple(self):
"""
It's equivalent to:
>>> time.struct_time((d.year, d.month, d.day, d.hour, d.minute, d.second, d.weekday(), dayofyear, [-1|1|0])) # doctest: +SKIP
time.struct_time(tm_year=2015, tm_mon=7, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=1, tm_yday=209, tm_isdst=-1)
The tm_isdst flag of the result is set according to the dst() method: `tzinfo`
is None or dst() returns None, tm_isdst is set to -1; else if dst()
returns a non-zero value, tm_isdst is set to 1; else tm_isdst is set to 0.
:return: A :py:class:`time.struct_time` such as returned by time.localtime().
:rtype: :py:class:`time.struct_time`
"""
return time.struct_time((
self.year,
self.month,
self.day,
0,
0,
0,
self.weekday(),
self.dayofyear(),
-1
)) | python | def timetuple(self):
"""
It's equivalent to:
>>> time.struct_time((d.year, d.month, d.day, d.hour, d.minute, d.second, d.weekday(), dayofyear, [-1|1|0])) # doctest: +SKIP
time.struct_time(tm_year=2015, tm_mon=7, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=1, tm_yday=209, tm_isdst=-1)
The tm_isdst flag of the result is set according to the dst() method: `tzinfo`
is None or dst() returns None, tm_isdst is set to -1; else if dst()
returns a non-zero value, tm_isdst is set to 1; else tm_isdst is set to 0.
:return: A :py:class:`time.struct_time` such as returned by time.localtime().
:rtype: :py:class:`time.struct_time`
"""
return time.struct_time((
self.year,
self.month,
self.day,
0,
0,
0,
self.weekday(),
self.dayofyear(),
-1
)) | It's equivalent to:
>>> time.struct_time((d.year, d.month, d.day, d.hour, d.minute, d.second, d.weekday(), dayofyear, [-1|1|0])) # doctest: +SKIP
time.struct_time(tm_year=2015, tm_mon=7, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=1, tm_yday=209, tm_isdst=-1)
The tm_isdst flag of the result is set according to the dst() method: `tzinfo`
is None or dst() returns None, tm_isdst is set to -1; else if dst()
returns a non-zero value, tm_isdst is set to 1; else tm_isdst is set to 0.
:return: A :py:class:`time.struct_time` such as returned by time.localtime().
:rtype: :py:class:`time.struct_time` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_date.py#L257-L283 |
pylover/khayyam | khayyam/jalali_date.py | JalaliDate.weekofyear | def weekofyear(self, first_day_of_week=SATURDAY):
"""weekofyear(first_day_of_week=SATURDAY)
:param first_day_of_week: One of the
:py:data:`khayyam.SATURDAY`,
:py:data:`khayyam.SUNDAY`,
:py:data:`khayyam.MONDAY`,
:py:data:`khayyam.TUESDAY`,
:py:data:`khayyam.WEDNESDAY`,
:py:data:`khayyam.THURSDAY` or
:py:data:`khayyam.FRIDAY`
:return: The week number of the year.
:rtype: int
"""
first_day_of_year = self.firstdayofyear()
days = (self - first_day_of_year).days
offset = first_day_of_week - first_day_of_year.weekday()
if offset < 0:
offset += 7
if days < offset:
return 0
return int((days - offset) / 7 + 1) | python | def weekofyear(self, first_day_of_week=SATURDAY):
"""weekofyear(first_day_of_week=SATURDAY)
:param first_day_of_week: One of the
:py:data:`khayyam.SATURDAY`,
:py:data:`khayyam.SUNDAY`,
:py:data:`khayyam.MONDAY`,
:py:data:`khayyam.TUESDAY`,
:py:data:`khayyam.WEDNESDAY`,
:py:data:`khayyam.THURSDAY` or
:py:data:`khayyam.FRIDAY`
:return: The week number of the year.
:rtype: int
"""
first_day_of_year = self.firstdayofyear()
days = (self - first_day_of_year).days
offset = first_day_of_week - first_day_of_year.weekday()
if offset < 0:
offset += 7
if days < offset:
return 0
return int((days - offset) / 7 + 1) | weekofyear(first_day_of_week=SATURDAY)
:param first_day_of_week: One of the
:py:data:`khayyam.SATURDAY`,
:py:data:`khayyam.SUNDAY`,
:py:data:`khayyam.MONDAY`,
:py:data:`khayyam.TUESDAY`,
:py:data:`khayyam.WEDNESDAY`,
:py:data:`khayyam.THURSDAY` or
:py:data:`khayyam.FRIDAY`
:return: The week number of the year.
:rtype: int | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_date.py#L447-L470 |
pylover/khayyam | khayyam/jalali_datetime.py | JalaliDatetime.fromtimestamp | def fromtimestamp(cls, timestamp, tz=None):
"""
Creates a new :py:class:`khayyam.JalaliDatetime` instance from the given posix timestamp.
If optional argument tz is :py:obj:`None` or not specified, the timestamp is converted to
the platform's local date and time, and the returned datetime object is naive.
Else tz must be an instance of a class :py:class:`datetime.tzinfo` subclass,
and the timestamp is converted to tz's time zone. In this case the result is
equivalent to `tz.fromutc(JalaliDatetime.utcfromtimestamp(timestamp).replace(tzinfo=tz))`.
This method may raise `ValueError`, if the timestamp is out of the range of values
supported by the platform C localtime() or gmtime() functions.
It's common for this to be restricted to years in 1970 through 2038.
Note that on non-POSIX systems that include leap seconds in their
notion of a timestamp, leap seconds are ignored by fromtimestamp(), and then
it's possible to have two timestamps differing by a second that yield
identical datetime objects. See also :py:class:`khayyam.JalaliDatetime.utcfromtimestamp`.
.. testsetup:: api-datetime-fromtimestamp
import khayyam
from khayyam import JalaliDatetime
.. doctest:: api-datetime-fromtimestamp
>>> JalaliDatetime.fromtimestamp(1313132131.21232)
khayyam.JalaliDatetime(1390, 5, 21, 11, 25, 31, 212320, Jomeh)
:param timestamp: float the posix timestamp, i.e 1014324234.23423423.
:param tz: :py:class:`datetime.tzinfo` The optional timezone to get local date & time from the given timestamp.
:return: The local date and time corresponding to the POSIX timestamp, such as is returned by :py:func:`time.time()`.
:rtype: :py:class:`khayyam.JalaliDatetime`
"""
return cls(datetime.fromtimestamp(timestamp, tz=tz)) | python | def fromtimestamp(cls, timestamp, tz=None):
"""
Creates a new :py:class:`khayyam.JalaliDatetime` instance from the given posix timestamp.
If optional argument tz is :py:obj:`None` or not specified, the timestamp is converted to
the platform's local date and time, and the returned datetime object is naive.
Else tz must be an instance of a class :py:class:`datetime.tzinfo` subclass,
and the timestamp is converted to tz's time zone. In this case the result is
equivalent to `tz.fromutc(JalaliDatetime.utcfromtimestamp(timestamp).replace(tzinfo=tz))`.
This method may raise `ValueError`, if the timestamp is out of the range of values
supported by the platform C localtime() or gmtime() functions.
It's common for this to be restricted to years in 1970 through 2038.
Note that on non-POSIX systems that include leap seconds in their
notion of a timestamp, leap seconds are ignored by fromtimestamp(), and then
it's possible to have two timestamps differing by a second that yield
identical datetime objects. See also :py:class:`khayyam.JalaliDatetime.utcfromtimestamp`.
.. testsetup:: api-datetime-fromtimestamp
import khayyam
from khayyam import JalaliDatetime
.. doctest:: api-datetime-fromtimestamp
>>> JalaliDatetime.fromtimestamp(1313132131.21232)
khayyam.JalaliDatetime(1390, 5, 21, 11, 25, 31, 212320, Jomeh)
:param timestamp: float the posix timestamp, i.e 1014324234.23423423.
:param tz: :py:class:`datetime.tzinfo` The optional timezone to get local date & time from the given timestamp.
:return: The local date and time corresponding to the POSIX timestamp, such as is returned by :py:func:`time.time()`.
:rtype: :py:class:`khayyam.JalaliDatetime`
"""
return cls(datetime.fromtimestamp(timestamp, tz=tz)) | Creates a new :py:class:`khayyam.JalaliDatetime` instance from the given posix timestamp.
If optional argument tz is :py:obj:`None` or not specified, the timestamp is converted to
the platform's local date and time, and the returned datetime object is naive.
Else tz must be an instance of a class :py:class:`datetime.tzinfo` subclass,
and the timestamp is converted to tz's time zone. In this case the result is
equivalent to `tz.fromutc(JalaliDatetime.utcfromtimestamp(timestamp).replace(tzinfo=tz))`.
This method may raise `ValueError`, if the timestamp is out of the range of values
supported by the platform C localtime() or gmtime() functions.
It's common for this to be restricted to years in 1970 through 2038.
Note that on non-POSIX systems that include leap seconds in their
notion of a timestamp, leap seconds are ignored by fromtimestamp(), and then
it's possible to have two timestamps differing by a second that yield
identical datetime objects. See also :py:class:`khayyam.JalaliDatetime.utcfromtimestamp`.
.. testsetup:: api-datetime-fromtimestamp
import khayyam
from khayyam import JalaliDatetime
.. doctest:: api-datetime-fromtimestamp
>>> JalaliDatetime.fromtimestamp(1313132131.21232)
khayyam.JalaliDatetime(1390, 5, 21, 11, 25, 31, 212320, Jomeh)
:param timestamp: float the posix timestamp, i.e 1014324234.23423423.
:param tz: :py:class:`datetime.tzinfo` The optional timezone to get local date & time from the given timestamp.
:return: The local date and time corresponding to the POSIX timestamp, such as is returned by :py:func:`time.time()`.
:rtype: :py:class:`khayyam.JalaliDatetime` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_datetime.py#L164-L199 |
pylover/khayyam | khayyam/jalali_datetime.py | JalaliDatetime.combine | def combine(cls, date, _time):
"""
Return a new jalali datetime object whose date members are equal to the given date object's, and whose _time
and tzinfo members are equal to the given _time object's.
For any datetime object d, d == datetime.combine(d.date(), d.timetz()). If date is a datetime object, its _time
and tzinfo members are ignored.
:param date: :py:class:`khayyam.JalaliDate` the date object to combine.
:param _time: :py:class:`datetime.time` the time object to combine.
:return: the combined jalali date & time object.
:rtype: :py:class:`khayyam.JalaliDatetime`
"""
if isinstance(date, (JalaliDatetime, khayyam.JalaliDate)):
date = date.todate()
return cls(datetime.combine(date, _time)) | python | def combine(cls, date, _time):
"""
Return a new jalali datetime object whose date members are equal to the given date object's, and whose _time
and tzinfo members are equal to the given _time object's.
For any datetime object d, d == datetime.combine(d.date(), d.timetz()). If date is a datetime object, its _time
and tzinfo members are ignored.
:param date: :py:class:`khayyam.JalaliDate` the date object to combine.
:param _time: :py:class:`datetime.time` the time object to combine.
:return: the combined jalali date & time object.
:rtype: :py:class:`khayyam.JalaliDatetime`
"""
if isinstance(date, (JalaliDatetime, khayyam.JalaliDate)):
date = date.todate()
return cls(datetime.combine(date, _time)) | Return a new jalali datetime object whose date members are equal to the given date object's, and whose _time
and tzinfo members are equal to the given _time object's.
For any datetime object d, d == datetime.combine(d.date(), d.timetz()). If date is a datetime object, its _time
and tzinfo members are ignored.
:param date: :py:class:`khayyam.JalaliDate` the date object to combine.
:param _time: :py:class:`datetime.time` the time object to combine.
:return: the combined jalali date & time object.
:rtype: :py:class:`khayyam.JalaliDatetime` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_datetime.py#L227-L241 |
pylover/khayyam | khayyam/jalali_datetime.py | JalaliDatetime.todatetime | def todatetime(self):
"""
Converts the current instance to the python builtins :py:class:`datetime.datetime` instance.
:return: the new :py:class:`datetime.datetime` instance representing the current date and time in gregorian calendar.
:rtype: :py:class:`datetime.datetime`
"""
arr = get_gregorian_date_from_julian_day(self.tojulianday())
return datetime(int(arr[0]), int(arr[1]), int(arr[2]), self.hour, self.minute, self.second, self.microsecond,
self.tzinfo) | python | def todatetime(self):
"""
Converts the current instance to the python builtins :py:class:`datetime.datetime` instance.
:return: the new :py:class:`datetime.datetime` instance representing the current date and time in gregorian calendar.
:rtype: :py:class:`datetime.datetime`
"""
arr = get_gregorian_date_from_julian_day(self.tojulianday())
return datetime(int(arr[0]), int(arr[1]), int(arr[2]), self.hour, self.minute, self.second, self.microsecond,
self.tzinfo) | Converts the current instance to the python builtins :py:class:`datetime.datetime` instance.
:return: the new :py:class:`datetime.datetime` instance representing the current date and time in gregorian calendar.
:rtype: :py:class:`datetime.datetime` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_datetime.py#L265-L274 |
pylover/khayyam | khayyam/jalali_datetime.py | JalaliDatetime.date | def date(self):
"""
Return date object with same year, month and day.
:rtype: :py:class:`khayyam.JalaliDate`
"""
return khayyam.JalaliDate(self.year, self.month, self.day) | python | def date(self):
"""
Return date object with same year, month and day.
:rtype: :py:class:`khayyam.JalaliDate`
"""
return khayyam.JalaliDate(self.year, self.month, self.day) | Return date object with same year, month and day.
:rtype: :py:class:`khayyam.JalaliDate` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_datetime.py#L276-L282 |
pylover/khayyam | khayyam/jalali_datetime.py | JalaliDatetime.replace | def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=None):
"""
Return a :py:class:`khayyam.JalaliDatetime` instance with the same attributes, except for those attributes
given new values by whichever keyword arguments are specified. Note that tzinfo=None can be specified to create
a naive datetime from an aware datetime with no conversion of date and time data, without adjusting the date
the and time based tzinfo.
:param year: int
:param month: int
:param day: int
:param hour: int
:param minute: int
:param second: int
:param microsecond: int
:param tzinfo: :py:class:`datetime.tzinfo`
:rtype: :py:class:`khayyam.JalaliDatetime`
"""
year, month, day = self._validate(
year if year else self.year,
month if month else self.month,
day if day else self.day
)
result = JalaliDatetime(
year,
month,
day,
self.hour if hour is None else hour,
self.minute if minute is None else minute,
self.second if second is None else second,
self.microsecond if microsecond is None else microsecond,
tzinfo if tzinfo != self.tzinfo else self.tzinfo
)
return result | python | def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=None):
"""
Return a :py:class:`khayyam.JalaliDatetime` instance with the same attributes, except for those attributes
given new values by whichever keyword arguments are specified. Note that tzinfo=None can be specified to create
a naive datetime from an aware datetime with no conversion of date and time data, without adjusting the date
the and time based tzinfo.
:param year: int
:param month: int
:param day: int
:param hour: int
:param minute: int
:param second: int
:param microsecond: int
:param tzinfo: :py:class:`datetime.tzinfo`
:rtype: :py:class:`khayyam.JalaliDatetime`
"""
year, month, day = self._validate(
year if year else self.year,
month if month else self.month,
day if day else self.day
)
result = JalaliDatetime(
year,
month,
day,
self.hour if hour is None else hour,
self.minute if minute is None else minute,
self.second if second is None else second,
self.microsecond if microsecond is None else microsecond,
tzinfo if tzinfo != self.tzinfo else self.tzinfo
)
return result | Return a :py:class:`khayyam.JalaliDatetime` instance with the same attributes, except for those attributes
given new values by whichever keyword arguments are specified. Note that tzinfo=None can be specified to create
a naive datetime from an aware datetime with no conversion of date and time data, without adjusting the date
the and time based tzinfo.
:param year: int
:param month: int
:param day: int
:param hour: int
:param minute: int
:param second: int
:param microsecond: int
:param tzinfo: :py:class:`datetime.tzinfo`
:rtype: :py:class:`khayyam.JalaliDatetime` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_datetime.py#L302-L336 |
pylover/khayyam | khayyam/jalali_datetime.py | JalaliDatetime.astimezone | def astimezone(self, tz):
"""
Return a :py:class:`khayyam.JalaliDatetime` object with new :py:meth:`khayyam.JalaliDatetime.tzinfo` attribute
tz, adjusting the date and time data so the result is the same UTC time as self, but in *tz*‘s local time.
*tz* must be an instance of a :py:class:`datetime.tzinfo` subclass, and
its :py:meth:`datetime.tzinfo.utcoffset()` and :py:meth:`datetime.tzinfo.dst()` methods must not
return :py:obj:`None`. *self* must be aware (`self.tzinfo` must not be `None`, and `self.utcoffset()` must
not return `None`).
If `self.tzinfo` is `tz`, `self.astimezone(tz)` is equal to `self`: no adjustment of date or time data is
performed. Else the result is local time in time zone `tz`, representing the same UTC time as `self`:
after `astz = dt.astimezone(tz), astz - astz.utcoffset()` will usually have the same date and time data as
`dt - dt.utcoffset()`. The discussion of class :py:class:`datetime.tzinfo` explains the cases at Daylight
Saving Time transition boundaries where this cannot be achieved (an issue only if `tz` models both
standard and daylight time).
If you merely want to attach a time zone object `tz` to a datetime dt without adjustment of date and time data,
use `dt.replace(tzinfo=tz)`. If you merely want to remove the time zone object from an aware datetime dt
without conversion of date and time data, use `dt.replace(tzinfo=None)`.
Note that the default :py:meth:`datetime.tzinfo.fromutc()` method can be overridden in a
:py:class:`datetime.tzinfo` subclass to affect the result returned
by :py:meth:`khayyam.JalaliDatetime.astimezone()`. Ignoring error
cases, :py:meth:`khayyam.JalaliDatetime.astimezone()` acts like:
.. code-block:: python
:emphasize-lines: 3,5
def astimezone(self, tz): # doctest: +SKIP
if self.tzinfo is tz:
return self
if self.tzinfo:
utc = self - self.utcoffset()
else:
utc = self
return tz.fromutc(utc.replace(tzinfo=tz))
:param tz: :py:class:`datetime.tzinfo`
:rtype: :py:class:`khayyam.JalaliDatetime`
"""
if self.tzinfo is tz:
return self
if self.tzinfo:
utc = self - self.utcoffset()
else:
utc = self
return tz.fromutc(utc.replace(tzinfo=tz)) | python | def astimezone(self, tz):
"""
Return a :py:class:`khayyam.JalaliDatetime` object with new :py:meth:`khayyam.JalaliDatetime.tzinfo` attribute
tz, adjusting the date and time data so the result is the same UTC time as self, but in *tz*‘s local time.
*tz* must be an instance of a :py:class:`datetime.tzinfo` subclass, and
its :py:meth:`datetime.tzinfo.utcoffset()` and :py:meth:`datetime.tzinfo.dst()` methods must not
return :py:obj:`None`. *self* must be aware (`self.tzinfo` must not be `None`, and `self.utcoffset()` must
not return `None`).
If `self.tzinfo` is `tz`, `self.astimezone(tz)` is equal to `self`: no adjustment of date or time data is
performed. Else the result is local time in time zone `tz`, representing the same UTC time as `self`:
after `astz = dt.astimezone(tz), astz - astz.utcoffset()` will usually have the same date and time data as
`dt - dt.utcoffset()`. The discussion of class :py:class:`datetime.tzinfo` explains the cases at Daylight
Saving Time transition boundaries where this cannot be achieved (an issue only if `tz` models both
standard and daylight time).
If you merely want to attach a time zone object `tz` to a datetime dt without adjustment of date and time data,
use `dt.replace(tzinfo=tz)`. If you merely want to remove the time zone object from an aware datetime dt
without conversion of date and time data, use `dt.replace(tzinfo=None)`.
Note that the default :py:meth:`datetime.tzinfo.fromutc()` method can be overridden in a
:py:class:`datetime.tzinfo` subclass to affect the result returned
by :py:meth:`khayyam.JalaliDatetime.astimezone()`. Ignoring error
cases, :py:meth:`khayyam.JalaliDatetime.astimezone()` acts like:
.. code-block:: python
:emphasize-lines: 3,5
def astimezone(self, tz): # doctest: +SKIP
if self.tzinfo is tz:
return self
if self.tzinfo:
utc = self - self.utcoffset()
else:
utc = self
return tz.fromutc(utc.replace(tzinfo=tz))
:param tz: :py:class:`datetime.tzinfo`
:rtype: :py:class:`khayyam.JalaliDatetime`
"""
if self.tzinfo is tz:
return self
if self.tzinfo:
utc = self - self.utcoffset()
else:
utc = self
return tz.fromutc(utc.replace(tzinfo=tz)) | Return a :py:class:`khayyam.JalaliDatetime` object with new :py:meth:`khayyam.JalaliDatetime.tzinfo` attribute
tz, adjusting the date and time data so the result is the same UTC time as self, but in *tz*‘s local time.
*tz* must be an instance of a :py:class:`datetime.tzinfo` subclass, and
its :py:meth:`datetime.tzinfo.utcoffset()` and :py:meth:`datetime.tzinfo.dst()` methods must not
return :py:obj:`None`. *self* must be aware (`self.tzinfo` must not be `None`, and `self.utcoffset()` must
not return `None`).
If `self.tzinfo` is `tz`, `self.astimezone(tz)` is equal to `self`: no adjustment of date or time data is
performed. Else the result is local time in time zone `tz`, representing the same UTC time as `self`:
after `astz = dt.astimezone(tz), astz - astz.utcoffset()` will usually have the same date and time data as
`dt - dt.utcoffset()`. The discussion of class :py:class:`datetime.tzinfo` explains the cases at Daylight
Saving Time transition boundaries where this cannot be achieved (an issue only if `tz` models both
standard and daylight time).
If you merely want to attach a time zone object `tz` to a datetime dt without adjustment of date and time data,
use `dt.replace(tzinfo=tz)`. If you merely want to remove the time zone object from an aware datetime dt
without conversion of date and time data, use `dt.replace(tzinfo=None)`.
Note that the default :py:meth:`datetime.tzinfo.fromutc()` method can be overridden in a
:py:class:`datetime.tzinfo` subclass to affect the result returned
by :py:meth:`khayyam.JalaliDatetime.astimezone()`. Ignoring error
cases, :py:meth:`khayyam.JalaliDatetime.astimezone()` acts like:
.. code-block:: python
:emphasize-lines: 3,5
def astimezone(self, tz): # doctest: +SKIP
if self.tzinfo is tz:
return self
if self.tzinfo:
utc = self - self.utcoffset()
else:
utc = self
return tz.fromutc(utc.replace(tzinfo=tz))
:param tz: :py:class:`datetime.tzinfo`
:rtype: :py:class:`khayyam.JalaliDatetime` | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_datetime.py#L338-L387 |
pylover/khayyam | khayyam/jalali_datetime.py | JalaliDatetime.hour12 | def hour12(self):
"""
Return The hour value between `1-12`. use :py:meth:`khayyam.JalaliDatetime.ampm()` or
:py:meth:`khayyam.JalaliDatetime.ampmascii()` to determine `ante meridiem` and or `post meridiem`
:rtype: int
"""
res = self.hour
if res > 12:
res -= 12
elif res == 0:
res = 12
return res | python | def hour12(self):
"""
Return The hour value between `1-12`. use :py:meth:`khayyam.JalaliDatetime.ampm()` or
:py:meth:`khayyam.JalaliDatetime.ampmascii()` to determine `ante meridiem` and or `post meridiem`
:rtype: int
"""
res = self.hour
if res > 12:
res -= 12
elif res == 0:
res = 12
return res | Return The hour value between `1-12`. use :py:meth:`khayyam.JalaliDatetime.ampm()` or
:py:meth:`khayyam.JalaliDatetime.ampmascii()` to determine `ante meridiem` and or `post meridiem`
:rtype: int | https://github.com/pylover/khayyam/blob/7e3a30bb941f8dc8bad8bf9d3be2336fed04bb57/khayyam/jalali_datetime.py#L568-L580 |
mmcauliffe/Conch-sounds | conch/analysis/formants/lpc.py | lpc_ref | def lpc_ref(signal, order):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Notes
----
This is just for reference, as it is using the direct inversion of the
toeplitz matrix, which is really slow"""
if signal.ndim > 1:
raise ValueError("Array of rank > 1 not supported yet")
if order > signal.size:
raise ValueError("Input signal must have a lenght >= lpc order")
if order > 0:
p = order + 1
r = np.zeros(p, 'float32')
# Number of non zero values in autocorrelation one needs for p LPC
# coefficients
nx = np.min([p, signal.size])
x = np.correlate(signal, signal, 'full')
r[:nx] = x[signal.size - 1:signal.size + order]
phi = np.dot(sp.linalg.inv(sp.linalg.toeplitz(r[:-1])), -r[1:])
return np.concatenate(([1.], phi))
else:
return np.ones(1, dtype='float32') | python | def lpc_ref(signal, order):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Notes
----
This is just for reference, as it is using the direct inversion of the
toeplitz matrix, which is really slow"""
if signal.ndim > 1:
raise ValueError("Array of rank > 1 not supported yet")
if order > signal.size:
raise ValueError("Input signal must have a lenght >= lpc order")
if order > 0:
p = order + 1
r = np.zeros(p, 'float32')
# Number of non zero values in autocorrelation one needs for p LPC
# coefficients
nx = np.min([p, signal.size])
x = np.correlate(signal, signal, 'full')
r[:nx] = x[signal.size - 1:signal.size + order]
phi = np.dot(sp.linalg.inv(sp.linalg.toeplitz(r[:-1])), -r[1:])
return np.concatenate(([1.], phi))
else:
return np.ones(1, dtype='float32') | Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Notes
----
This is just for reference, as it is using the direct inversion of the
toeplitz matrix, which is really slow | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/formants/lpc.py#L13-L50 |
mmcauliffe/Conch-sounds | conch/analysis/formants/lpc.py | levinson_1d | def levinson_1d(r, order):
"""Levinson-Durbin recursion, to efficiently solve symmetric linear systems
with toeplitz structure.
Parameters
---------
r : array-like
input array to invert (since the matrix is symmetric Toeplitz, the
corresponding pxp matrix is defined by p items only). Generally the
autocorrelation of the signal for linear prediction coefficients
estimation. The first item must be a non zero real.
Notes
----
This implementation is in python, hence unsuitable for any serious
computation. Use it as educational and reference purpose only.
Levinson is a well-known algorithm to solve the Hermitian toeplitz
equation:
_ _
-R[1] = R[0] R[1] ... R[p-1] a[1]
: : : : * :
: : : _ * :
-R[p] = R[p-1] R[p-2] ... R[0] a[p]
_
with respect to a ( is the complex conjugate). Using the special symmetry
in the matrix, the inversion can be done in O(p^2) instead of O(p^3).
"""
r = np.atleast_1d(r)
if r.ndim > 1:
raise ValueError("Only rank 1 are supported for now.")
n = r.size
if n < 1:
raise ValueError("Cannot operate on empty array !")
elif order > n - 1:
raise ValueError("Order should be <= size-1")
if not np.isreal(r[0]):
raise ValueError("First item of input must be real.")
elif not np.isfinite(1 / r[0]):
raise ValueError("First item should be != 0")
# Estimated coefficients
a = np.empty(order + 1, 'float32')
# temporary array
t = np.empty(order + 1, 'float32')
# Reflection coefficients
k = np.empty(order, 'float32')
a[0] = 1.
e = r[0]
for i in range(1, order + 1):
acc = r[i]
for j in range(1, i):
acc += a[j] * r[i - j]
k[i - 1] = -acc / e
a[i] = k[i - 1]
for j in range(order):
t[j] = a[j]
for j in range(1, i):
a[j] += k[i - 1] * np.conj(t[i - j])
e *= 1 - k[i - 1] * np.conj(k[i - 1])
return a, e, k | python | def levinson_1d(r, order):
"""Levinson-Durbin recursion, to efficiently solve symmetric linear systems
with toeplitz structure.
Parameters
---------
r : array-like
input array to invert (since the matrix is symmetric Toeplitz, the
corresponding pxp matrix is defined by p items only). Generally the
autocorrelation of the signal for linear prediction coefficients
estimation. The first item must be a non zero real.
Notes
----
This implementation is in python, hence unsuitable for any serious
computation. Use it as educational and reference purpose only.
Levinson is a well-known algorithm to solve the Hermitian toeplitz
equation:
_ _
-R[1] = R[0] R[1] ... R[p-1] a[1]
: : : : * :
: : : _ * :
-R[p] = R[p-1] R[p-2] ... R[0] a[p]
_
with respect to a ( is the complex conjugate). Using the special symmetry
in the matrix, the inversion can be done in O(p^2) instead of O(p^3).
"""
r = np.atleast_1d(r)
if r.ndim > 1:
raise ValueError("Only rank 1 are supported for now.")
n = r.size
if n < 1:
raise ValueError("Cannot operate on empty array !")
elif order > n - 1:
raise ValueError("Order should be <= size-1")
if not np.isreal(r[0]):
raise ValueError("First item of input must be real.")
elif not np.isfinite(1 / r[0]):
raise ValueError("First item should be != 0")
# Estimated coefficients
a = np.empty(order + 1, 'float32')
# temporary array
t = np.empty(order + 1, 'float32')
# Reflection coefficients
k = np.empty(order, 'float32')
a[0] = 1.
e = r[0]
for i in range(1, order + 1):
acc = r[i]
for j in range(1, i):
acc += a[j] * r[i - j]
k[i - 1] = -acc / e
a[i] = k[i - 1]
for j in range(order):
t[j] = a[j]
for j in range(1, i):
a[j] += k[i - 1] * np.conj(t[i - j])
e *= 1 - k[i - 1] * np.conj(k[i - 1])
return a, e, k | Levinson-Durbin recursion, to efficiently solve symmetric linear systems
with toeplitz structure.
Parameters
---------
r : array-like
input array to invert (since the matrix is symmetric Toeplitz, the
corresponding pxp matrix is defined by p items only). Generally the
autocorrelation of the signal for linear prediction coefficients
estimation. The first item must be a non zero real.
Notes
----
This implementation is in python, hence unsuitable for any serious
computation. Use it as educational and reference purpose only.
Levinson is a well-known algorithm to solve the Hermitian toeplitz
equation:
_ _
-R[1] = R[0] R[1] ... R[p-1] a[1]
: : : : * :
: : : _ * :
-R[p] = R[p-1] R[p-2] ... R[0] a[p]
_
with respect to a ( is the complex conjugate). Using the special symmetry
in the matrix, the inversion can be done in O(p^2) instead of O(p^3). | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/formants/lpc.py#L54-L123 |
mmcauliffe/Conch-sounds | conch/analysis/formants/lpc.py | acorr_lpc | def acorr_lpc(x, axis=-1):
"""Compute autocorrelation of x along the given axis.
This compute the biased autocorrelation estimator (divided by the size of
input signal)
Notes
-----
The reason why we do not use acorr directly is for speed issue."""
if not np.isrealobj(x):
raise ValueError("Complex input not supported yet")
maxlag = x.shape[axis]
nfft = int(2 ** nextpow2(2 * maxlag - 1))
if axis != -1:
x = np.swapaxes(x, -1, axis)
a = _acorr_last_axis(x, nfft, maxlag)
if axis != -1:
a = np.swapaxes(a, -1, axis)
return a | python | def acorr_lpc(x, axis=-1):
"""Compute autocorrelation of x along the given axis.
This compute the biased autocorrelation estimator (divided by the size of
input signal)
Notes
-----
The reason why we do not use acorr directly is for speed issue."""
if not np.isrealobj(x):
raise ValueError("Complex input not supported yet")
maxlag = x.shape[axis]
nfft = int(2 ** nextpow2(2 * maxlag - 1))
if axis != -1:
x = np.swapaxes(x, -1, axis)
a = _acorr_last_axis(x, nfft, maxlag)
if axis != -1:
a = np.swapaxes(a, -1, axis)
return a | Compute autocorrelation of x along the given axis.
This compute the biased autocorrelation estimator (divided by the size of
input signal)
Notes
-----
The reason why we do not use acorr directly is for speed issue. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/formants/lpc.py#L133-L153 |
mmcauliffe/Conch-sounds | conch/analysis/formants/lpc.py | lpc | def lpc(signal, order, axis=-1):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Returns
-------
a : array-like
the solution of the inversion.
e : array-like
the prediction error.
k : array-like
reflection coefficients.
Notes
-----
This uses Levinson-Durbin recursion for the autocorrelation matrix
inversion, and fft for the autocorrelation computation.
For small order, particularly if order << signal size, direct computation
of the autocorrelation is faster: use levinson and correlate in this case."""
n = signal.shape[axis]
if order > n:
raise ValueError("Input signal must have length >= order")
r = acorr_lpc(signal, axis)
return levinson_1d(r, order) | python | def lpc(signal, order, axis=-1):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Returns
-------
a : array-like
the solution of the inversion.
e : array-like
the prediction error.
k : array-like
reflection coefficients.
Notes
-----
This uses Levinson-Durbin recursion for the autocorrelation matrix
inversion, and fft for the autocorrelation computation.
For small order, particularly if order << signal size, direct computation
of the autocorrelation is faster: use levinson and correlate in this case."""
n = signal.shape[axis]
if order > n:
raise ValueError("Input signal must have length >= order")
r = acorr_lpc(signal, axis)
return levinson_1d(r, order) | Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Returns
-------
a : array-like
the solution of the inversion.
e : array-like
the prediction error.
k : array-like
reflection coefficients.
Notes
-----
This uses Levinson-Durbin recursion for the autocorrelation matrix
inversion, and fft for the autocorrelation computation.
For small order, particularly if order << signal size, direct computation
of the autocorrelation is faster: use levinson and correlate in this case. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/formants/lpc.py#L157-L195 |
mmcauliffe/Conch-sounds | conch/main.py | acoustic_similarity_mapping | def acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping.
"""
num_cores = int((3 * cpu_count()) / 4)
segments = set()
for x in path_mapping:
segments.update(x)
if multiprocessing:
cache = generate_cache_mp(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_distances_mp(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
else:
cache = generate_cache_th(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_distances_th(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
return asim | python | def acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping.
"""
num_cores = int((3 * cpu_count()) / 4)
segments = set()
for x in path_mapping:
segments.update(x)
if multiprocessing:
cache = generate_cache_mp(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_distances_mp(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
else:
cache = generate_cache_th(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_distances_th(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
return asim | Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/main.py#L11-L41 |
mmcauliffe/Conch-sounds | conch/main.py | axb_mapping | def axb_mapping(path_mapping, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping.
"""
num_cores = int((3 * cpu_count()) / 4)
segments = set()
for x in path_mapping:
segments.update(x)
if multiprocessing:
cache = generate_cache_mp(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_axb_ratio_mp(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
else:
cache = generate_cache_th(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_axb_ratio_th(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
return asim | python | def axb_mapping(path_mapping, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping.
"""
num_cores = int((3 * cpu_count()) / 4)
segments = set()
for x in path_mapping:
segments.update(x)
if multiprocessing:
cache = generate_cache_mp(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_axb_ratio_mp(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
else:
cache = generate_cache_th(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_axb_ratio_th(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
return asim | Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/main.py#L43-L73 |
mmcauliffe/Conch-sounds | conch/main.py | acoustic_similarity_directories | def acoustic_similarity_directories(directories, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""
Analyze many directories.
Parameters
----------
directories : list of str
List of fully specified paths to the directories to be analyzed
"""
files = []
if call_back is not None:
call_back('Mapping directories...')
call_back(0, len(directories))
cur = 0
for d in directories:
if not os.path.isdir(d):
continue
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 3 == 0:
call_back(cur)
files += [os.path.join(d, x) for x in os.listdir(d) if x.lower().endswith('.wav')]
if len(files) == 0:
raise (ConchError("The directories specified do not contain any wav files"))
if call_back is not None:
call_back('Mapping directories...')
call_back(0, len(files) * len(files))
cur = 0
path_mapping = list()
for x in files:
for y in files:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 20 == 0:
call_back(cur)
if not x.lower().endswith('.wav'):
continue
if not y.lower().endswith('.wav'):
continue
if x == y:
continue
path_mapping.append((x, y))
result = acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check, call_back, multiprocessing)
return result | python | def acoustic_similarity_directories(directories, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""
Analyze many directories.
Parameters
----------
directories : list of str
List of fully specified paths to the directories to be analyzed
"""
files = []
if call_back is not None:
call_back('Mapping directories...')
call_back(0, len(directories))
cur = 0
for d in directories:
if not os.path.isdir(d):
continue
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 3 == 0:
call_back(cur)
files += [os.path.join(d, x) for x in os.listdir(d) if x.lower().endswith('.wav')]
if len(files) == 0:
raise (ConchError("The directories specified do not contain any wav files"))
if call_back is not None:
call_back('Mapping directories...')
call_back(0, len(files) * len(files))
cur = 0
path_mapping = list()
for x in files:
for y in files:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 20 == 0:
call_back(cur)
if not x.lower().endswith('.wav'):
continue
if not y.lower().endswith('.wav'):
continue
if x == y:
continue
path_mapping.append((x, y))
result = acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check, call_back, multiprocessing)
return result | Analyze many directories.
Parameters
----------
directories : list of str
List of fully specified paths to the directories to be analyzed | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/main.py#L76-L130 |
mmcauliffe/Conch-sounds | conch/distance/dtw.py | dtw_distance | def dtw_distance(rep_one, rep_two, norm=True):
"""Computes the distance between two representations with the same
number of filters using Dynamic Time Warping.
Parameters
----------
rep_one : 2D array
First representation to compare. First dimension is time in frames
or samples and second dimension is the features.
rep_two : 2D array
Second representation to compare. First dimension is time in frames
or samples and second dimension is the features.
Returns
-------
float
Distance of dynamically time warping `rep_one` to `rep_two`.
"""
if not isinstance(rep_one, np.ndarray):
rep_one = rep_one.to_array()
if not isinstance(rep_two, np.ndarray):
rep_two = rep_two.to_array()
assert (rep_one.shape[1] == rep_two.shape[1])
distMat = generate_distance_matrix(rep_one, rep_two)
return regularDTW(distMat, norm=norm) | python | def dtw_distance(rep_one, rep_two, norm=True):
"""Computes the distance between two representations with the same
number of filters using Dynamic Time Warping.
Parameters
----------
rep_one : 2D array
First representation to compare. First dimension is time in frames
or samples and second dimension is the features.
rep_two : 2D array
Second representation to compare. First dimension is time in frames
or samples and second dimension is the features.
Returns
-------
float
Distance of dynamically time warping `rep_one` to `rep_two`.
"""
if not isinstance(rep_one, np.ndarray):
rep_one = rep_one.to_array()
if not isinstance(rep_two, np.ndarray):
rep_two = rep_two.to_array()
assert (rep_one.shape[1] == rep_two.shape[1])
distMat = generate_distance_matrix(rep_one, rep_two)
return regularDTW(distMat, norm=norm) | Computes the distance between two representations with the same
number of filters using Dynamic Time Warping.
Parameters
----------
rep_one : 2D array
First representation to compare. First dimension is time in frames
or samples and second dimension is the features.
rep_two : 2D array
Second representation to compare. First dimension is time in frames
or samples and second dimension is the features.
Returns
-------
float
Distance of dynamically time warping `rep_one` to `rep_two`. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/distance/dtw.py#L16-L41 |
mmcauliffe/Conch-sounds | conch/distance/dtw.py | generate_distance_matrix | def generate_distance_matrix(source, target, weights=None):
"""Generates a local distance matrix for use in dynamic time warping.
Parameters
----------
source : 2D array
Source matrix with features in the second dimension.
target : 2D array
Target matrix with features in the second dimension.
Returns
-------
2D array
Local distance matrix.
"""
if weights is None:
weights = ones((source.shape[1], 1))
sLen = source.shape[0]
tLen = target.shape[0]
distMat = zeros((sLen, tLen))
for i in range(sLen):
for j in range(tLen):
distMat[i, j] = euclidean(source[i, :], target[j, :])
return distMat | python | def generate_distance_matrix(source, target, weights=None):
"""Generates a local distance matrix for use in dynamic time warping.
Parameters
----------
source : 2D array
Source matrix with features in the second dimension.
target : 2D array
Target matrix with features in the second dimension.
Returns
-------
2D array
Local distance matrix.
"""
if weights is None:
weights = ones((source.shape[1], 1))
sLen = source.shape[0]
tLen = target.shape[0]
distMat = zeros((sLen, tLen))
for i in range(sLen):
for j in range(tLen):
distMat[i, j] = euclidean(source[i, :], target[j, :])
return distMat | Generates a local distance matrix for use in dynamic time warping.
Parameters
----------
source : 2D array
Source matrix with features in the second dimension.
target : 2D array
Target matrix with features in the second dimension.
Returns
-------
2D array
Local distance matrix. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/distance/dtw.py#L44-L68 |
mmcauliffe/Conch-sounds | conch/distance/dtw.py | regularDTW | def regularDTW(distMat, norm=True):
"""Use a local distance matrix to perform dynamic time warping.
Parameters
----------
distMat : 2D array
Local distance matrix.
Returns
-------
float
Total unweighted distance of the optimal path through the
local distance matrix.
"""
sLen, tLen = distMat.shape
totalDistance = zeros((sLen, tLen))
totalDistance[0:sLen, 0:tLen] = distMat
minDirection = zeros((sLen, tLen))
for i in range(1, sLen):
totalDistance[i, 0] = totalDistance[i, 0] + totalDistance[i - 1, 0]
for j in range(1, tLen):
totalDistance[0, j] = totalDistance[0, j] + totalDistance[0, j - 1]
for i in range(1, sLen):
for j in range(1, tLen):
# direction,minPrevDistance = min(enumerate([totalDistance[i,j],totalDistance[i,j+1],totalDistance[i+1,j]]), key=operator.itemgetter(1))
# totalDistance[i+1,j+1] = totalDistance[i+1,j+1] + minPrevDistance
# minDirection[i,j] = direction
minDirection[i, j], totalDistance[i, j] = min(
enumerate([totalDistance[i - 1, j - 1] + 2 * totalDistance[i, j],
totalDistance[i - 1, j] + totalDistance[i, j],
totalDistance[i, j - 1] + totalDistance[i, j]]), key=operator.itemgetter(1))
if norm:
return totalDistance[sLen - 1, tLen - 1] / (sLen + tLen)
return totalDistance[sLen - 1, tLen - 1] | python | def regularDTW(distMat, norm=True):
"""Use a local distance matrix to perform dynamic time warping.
Parameters
----------
distMat : 2D array
Local distance matrix.
Returns
-------
float
Total unweighted distance of the optimal path through the
local distance matrix.
"""
sLen, tLen = distMat.shape
totalDistance = zeros((sLen, tLen))
totalDistance[0:sLen, 0:tLen] = distMat
minDirection = zeros((sLen, tLen))
for i in range(1, sLen):
totalDistance[i, 0] = totalDistance[i, 0] + totalDistance[i - 1, 0]
for j in range(1, tLen):
totalDistance[0, j] = totalDistance[0, j] + totalDistance[0, j - 1]
for i in range(1, sLen):
for j in range(1, tLen):
# direction,minPrevDistance = min(enumerate([totalDistance[i,j],totalDistance[i,j+1],totalDistance[i+1,j]]), key=operator.itemgetter(1))
# totalDistance[i+1,j+1] = totalDistance[i+1,j+1] + minPrevDistance
# minDirection[i,j] = direction
minDirection[i, j], totalDistance[i, j] = min(
enumerate([totalDistance[i - 1, j - 1] + 2 * totalDistance[i, j],
totalDistance[i - 1, j] + totalDistance[i, j],
totalDistance[i, j - 1] + totalDistance[i, j]]), key=operator.itemgetter(1))
if norm:
return totalDistance[sLen - 1, tLen - 1] / (sLen + tLen)
return totalDistance[sLen - 1, tLen - 1] | Use a local distance matrix to perform dynamic time warping.
Parameters
----------
distMat : 2D array
Local distance matrix.
Returns
-------
float
Total unweighted distance of the optimal path through the
local distance matrix. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/distance/dtw.py#L71-L109 |
mmcauliffe/Conch-sounds | conch/analysis/helper.py | preproc | def preproc(path, sr=16000, alpha=0.95):
"""Preprocess a .wav file for later processing. Currently assumes a
16-bit PCM input. Only returns left channel of stereo files.
Parameters
----------
path : str
Full path to .wav file to load.
sr : int, optional
Sampling rate to resample at, if specified.
alpha : float, optional
Alpha for preemphasis, defaults to 0.97.
Returns
-------
int
Sampling rate.
array
Processed PCM.
"""
oldsr, sig = wavfile.read(path)
try:
sig = sig[:, 0]
except IndexError:
pass
if False and sr != oldsr:
t = len(sig) / oldsr
numsamp = int(t * sr)
proc = resample(sig, numsamp)
else:
proc = sig
sr = oldsr
# proc = proc / 32768
if alpha is not None and alpha != 0:
proc = lfilter([1., -alpha], 1, proc)
return sr, proc | python | def preproc(path, sr=16000, alpha=0.95):
"""Preprocess a .wav file for later processing. Currently assumes a
16-bit PCM input. Only returns left channel of stereo files.
Parameters
----------
path : str
Full path to .wav file to load.
sr : int, optional
Sampling rate to resample at, if specified.
alpha : float, optional
Alpha for preemphasis, defaults to 0.97.
Returns
-------
int
Sampling rate.
array
Processed PCM.
"""
oldsr, sig = wavfile.read(path)
try:
sig = sig[:, 0]
except IndexError:
pass
if False and sr != oldsr:
t = len(sig) / oldsr
numsamp = int(t * sr)
proc = resample(sig, numsamp)
else:
proc = sig
sr = oldsr
# proc = proc / 32768
if alpha is not None and alpha != 0:
proc = lfilter([1., -alpha], 1, proc)
return sr, proc | Preprocess a .wav file for later processing. Currently assumes a
16-bit PCM input. Only returns left channel of stereo files.
Parameters
----------
path : str
Full path to .wav file to load.
sr : int, optional
Sampling rate to resample at, if specified.
alpha : float, optional
Alpha for preemphasis, defaults to 0.97.
Returns
-------
int
Sampling rate.
array
Processed PCM. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/helper.py#L29-L67 |
mmcauliffe/Conch-sounds | conch/analysis/helper.py | fftfilt | def fftfilt(b, x, *n):
"""Filter the signal x with the FIR filter described by the
coefficients in b using the overlap-add method. If the FFT
length n is not specified, it and the overlap-add block length
are selected so as to minimize the computational cost of
the filtering operation."""
N_x = len(x)
N_b = len(b)
# Determine the FFT length to use:
if len(n):
# Use the specified FFT length (rounded up to the nearest
# power of 2), provided that it is no less than the filter
# length:
n = n[0]
if n != int(n) or n <= 0:
raise ValueError('n must be a nonnegative integer')
if n < N_b:
n = N_b
N_fft = 2 ** nextpow2(n)
else:
if N_x > N_b:
# When the filter length is smaller than the signal,
# choose the FFT length and block size that minimize the
# FLOPS cost. Since the cost for a length-N FFT is
# (N/2)*log2(N) and the filtering operation of each block
# involves 2 FFT operations and N multiplications, the
# cost of the overlap-add method for 1 length-N block is
# N*(1+log2(N)). For the sake of efficiency, only FFT
# lengths that are powers of 2 are considered:
N = 2 ** np.arange(np.ceil(np.log2(N_b)), np.floor(np.log2(N_x)))
cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)
if len(cost) > 0:
N_fft = N[np.argmin(cost)]
else:
N_fft = 2 ** nextpow2(N_b + N_x - 1)
else:
# When the filter length is at least as long as the signal,
# filter the signal using a single block:
N_fft = 2 ** nextpow2(N_b + N_x - 1)
N_fft = int(N_fft)
# Compute the block length:
L = int(N_fft - N_b + 1)
# Compute the transform of the filter:
H = fft(b, N_fft)
y = np.zeros(N_x, np.float32)
i = 0
while i <= N_x:
il = np.min([i + L, N_x])
k = np.min([i + N_fft, N_x])
yt = ifft(fft(x[i:il], N_fft) * H, N_fft) # Overlap..
y[i:k] = y[i:k] + yt[:k - i] # and add
i += L
return y | python | def fftfilt(b, x, *n):
"""Filter the signal x with the FIR filter described by the
coefficients in b using the overlap-add method. If the FFT
length n is not specified, it and the overlap-add block length
are selected so as to minimize the computational cost of
the filtering operation."""
N_x = len(x)
N_b = len(b)
# Determine the FFT length to use:
if len(n):
# Use the specified FFT length (rounded up to the nearest
# power of 2), provided that it is no less than the filter
# length:
n = n[0]
if n != int(n) or n <= 0:
raise ValueError('n must be a nonnegative integer')
if n < N_b:
n = N_b
N_fft = 2 ** nextpow2(n)
else:
if N_x > N_b:
# When the filter length is smaller than the signal,
# choose the FFT length and block size that minimize the
# FLOPS cost. Since the cost for a length-N FFT is
# (N/2)*log2(N) and the filtering operation of each block
# involves 2 FFT operations and N multiplications, the
# cost of the overlap-add method for 1 length-N block is
# N*(1+log2(N)). For the sake of efficiency, only FFT
# lengths that are powers of 2 are considered:
N = 2 ** np.arange(np.ceil(np.log2(N_b)), np.floor(np.log2(N_x)))
cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)
if len(cost) > 0:
N_fft = N[np.argmin(cost)]
else:
N_fft = 2 ** nextpow2(N_b + N_x - 1)
else:
# When the filter length is at least as long as the signal,
# filter the signal using a single block:
N_fft = 2 ** nextpow2(N_b + N_x - 1)
N_fft = int(N_fft)
# Compute the block length:
L = int(N_fft - N_b + 1)
# Compute the transform of the filter:
H = fft(b, N_fft)
y = np.zeros(N_x, np.float32)
i = 0
while i <= N_x:
il = np.min([i + L, N_x])
k = np.min([i + N_fft, N_x])
yt = ifft(fft(x[i:il], N_fft) * H, N_fft) # Overlap..
y[i:k] = y[i:k] + yt[:k - i] # and add
i += L
return y | Filter the signal x with the FIR filter described by the
coefficients in b using the overlap-add method. If the FFT
length n is not specified, it and the overlap-add block length
are selected so as to minimize the computational cost of
the filtering operation. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/helper.py#L121-L184 |
mmcauliffe/Conch-sounds | conch/analysis/mfcc/rastamat.py | dct_spectrum | def dct_spectrum(spec):
"""Convert a spectrum into a cepstrum via type-III DCT (following HTK).
Parameters
----------
spec : array
Spectrum to perform a DCT on.
Returns
-------
array
Cepstrum of the input spectrum.
"""
ncep = spec.shape[0]
dctm = np.zeros((ncep, ncep))
for i in range(ncep):
dctm[i, :] = np.cos(i * np.arange(1, 2 * ncep, 2) / (2 * ncep) * np.pi) * np.sqrt(2 / ncep)
dctm *= 0.230258509299405
cep = np.dot(dctm, (10 * np.log10(spec + np.spacing(1))))
return cep | python | def dct_spectrum(spec):
"""Convert a spectrum into a cepstrum via type-III DCT (following HTK).
Parameters
----------
spec : array
Spectrum to perform a DCT on.
Returns
-------
array
Cepstrum of the input spectrum.
"""
ncep = spec.shape[0]
dctm = np.zeros((ncep, ncep))
for i in range(ncep):
dctm[i, :] = np.cos(i * np.arange(1, 2 * ncep, 2) / (2 * ncep) * np.pi) * np.sqrt(2 / ncep)
dctm *= 0.230258509299405
cep = np.dot(dctm, (10 * np.log10(spec + np.spacing(1))))
return cep | Convert a spectrum into a cepstrum via type-III DCT (following HTK).
Parameters
----------
spec : array
Spectrum to perform a DCT on.
Returns
-------
array
Cepstrum of the input spectrum. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/mfcc/rastamat.py#L12-L32 |
mmcauliffe/Conch-sounds | conch/analysis/mfcc/rastamat.py | construct_filterbank | def construct_filterbank(num_filters, nfft, sr, min_freq, max_freq):
"""Constructs a mel-frequency filter bank.
Parameters
----------
nfft : int
Number of points in the FFT.
Returns
-------
array
Filter bank to multiply an FFT spectrum to create a mel-frequency
spectrum.
"""
min_mel = freq_to_mel(min_freq)
max_mel = freq_to_mel(max_freq)
mel_points = np.linspace(min_mel, max_mel, num_filters + 2)
bin_freqs = mel_to_freq(mel_points)
# bins = round((nfft - 1) * bin_freqs / sr)
fftfreqs = np.arange(int(nfft / 2 + 1)) / nfft * sr
fbank = np.zeros((num_filters, int(nfft / 2 + 1)))
for i in range(num_filters):
fs = bin_freqs[i + np.arange(3)]
fs = fs[1] + (fs - fs[1])
loslope = (fftfreqs - fs[0]) / (fs[1] - fs[0])
highslope = (fs[2] - fftfreqs) / (fs[2] - fs[1])
fbank[i, :] = np.maximum(np.zeros(loslope.shape), np.minimum(loslope, highslope))
return fbank.transpose() | python | def construct_filterbank(num_filters, nfft, sr, min_freq, max_freq):
"""Constructs a mel-frequency filter bank.
Parameters
----------
nfft : int
Number of points in the FFT.
Returns
-------
array
Filter bank to multiply an FFT spectrum to create a mel-frequency
spectrum.
"""
min_mel = freq_to_mel(min_freq)
max_mel = freq_to_mel(max_freq)
mel_points = np.linspace(min_mel, max_mel, num_filters + 2)
bin_freqs = mel_to_freq(mel_points)
# bins = round((nfft - 1) * bin_freqs / sr)
fftfreqs = np.arange(int(nfft / 2 + 1)) / nfft * sr
fbank = np.zeros((num_filters, int(nfft / 2 + 1)))
for i in range(num_filters):
fs = bin_freqs[i + np.arange(3)]
fs = fs[1] + (fs - fs[1])
loslope = (fftfreqs - fs[0]) / (fs[1] - fs[0])
highslope = (fs[2] - fftfreqs) / (fs[2] - fs[1])
fbank[i, :] = np.maximum(np.zeros(loslope.shape), np.minimum(loslope, highslope))
return fbank.transpose() | Constructs a mel-frequency filter bank.
Parameters
----------
nfft : int
Number of points in the FFT.
Returns
-------
array
Filter bank to multiply an FFT spectrum to create a mel-frequency
spectrum. | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/mfcc/rastamat.py#L35-L66 |
frmdstryr/enamlx | enamlx/core/looper.py | ItemViewLooper._prefetch_items | def _prefetch_items(self,change):
""" When the current_row in the model changes (whether from scrolling) or
set by the application. Make sure the results are loaded!
"""
if self.is_initialized:
view = self.item_view
upper_limit = view.iterable_index+view.iterable_fetch_size-view.iterable_prefetch
lower_limit = max(0,view.iterable_index+view.iterable_prefetch)
offset = int(view.iterable_fetch_size/2.0)
upper_visible_row = view.visible_rect[2]
lower_visible_row = view.visible_rect[0]
print("Visible rect = %s"%view.visible_rect)
if upper_visible_row >= upper_limit:
next_index = max(0,upper_visible_row-offset) # Center on current row
# Going up works...
if next_index>view.iterable_index:
print("Auto prefetch upper limit %s!"%upper_limit)
view.iterable_index = next_index
#view.model().reset()
# But doewn doesnt?
elif view.iterable_index>0 and lower_visible_row < lower_limit:
next_index = max(0,lower_visible_row-offset) # Center on current row
# Going down works
if next_index<view.iterable_index:
print("Auto prefetch lower limit=%s, iterable=%s, setting next=%s!"%(lower_limit,view.iterable_index,next_index))
view.iterable_index = next_index | python | def _prefetch_items(self,change):
""" When the current_row in the model changes (whether from scrolling) or
set by the application. Make sure the results are loaded!
"""
if self.is_initialized:
view = self.item_view
upper_limit = view.iterable_index+view.iterable_fetch_size-view.iterable_prefetch
lower_limit = max(0,view.iterable_index+view.iterable_prefetch)
offset = int(view.iterable_fetch_size/2.0)
upper_visible_row = view.visible_rect[2]
lower_visible_row = view.visible_rect[0]
print("Visible rect = %s"%view.visible_rect)
if upper_visible_row >= upper_limit:
next_index = max(0,upper_visible_row-offset) # Center on current row
# Going up works...
if next_index>view.iterable_index:
print("Auto prefetch upper limit %s!"%upper_limit)
view.iterable_index = next_index
#view.model().reset()
# But doewn doesnt?
elif view.iterable_index>0 and lower_visible_row < lower_limit:
next_index = max(0,lower_visible_row-offset) # Center on current row
# Going down works
if next_index<view.iterable_index:
print("Auto prefetch lower limit=%s, iterable=%s, setting next=%s!"%(lower_limit,view.iterable_index,next_index))
view.iterable_index = next_index | When the current_row in the model changes (whether from scrolling) or
set by the application. Make sure the results are loaded! | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/core/looper.py#L157-L186 |
frmdstryr/enamlx | enamlx/core/looper.py | ItemViewLooper.windowed_iterable | def windowed_iterable(self):
""" That returns only the window """
# Seek to offset
effective_offset = max(0,self.item_view.iterable_index)
for i,item in enumerate(self.iterable):
if i<effective_offset:
continue
elif i>=(effective_offset+self.item_view.iterable_fetch_size):
return
yield item | python | def windowed_iterable(self):
""" That returns only the window """
# Seek to offset
effective_offset = max(0,self.item_view.iterable_index)
for i,item in enumerate(self.iterable):
if i<effective_offset:
continue
elif i>=(effective_offset+self.item_view.iterable_fetch_size):
return
yield item | That returns only the window | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/core/looper.py#L190-L199 |
frmdstryr/enamlx | enamlx/core/looper.py | ItemViewLooper.refresh_items | def refresh_items(self):
""" Refresh the items of the pattern.
This method destroys the old items and creates and initializes
the new items.
"""
old_items = self.items[:]# if self._dirty else []
old_iter_data = self._iter_data# if self._dirty else {}
iterable = self.windowed_iterable
pattern_nodes = self.pattern_nodes
new_iter_data = sortedmap()
new_items = []
if iterable is not None and len(pattern_nodes) > 0:
for loop_index, loop_item in enumerate(iterable):
iteration = old_iter_data.get(loop_item)
if iteration is not None:
new_iter_data[loop_item] = iteration
new_items.append(iteration)
old_items.remove(iteration)
continue
iteration = []
new_iter_data[loop_item] = iteration
new_items.append(iteration)
for nodes, key, f_locals in pattern_nodes:
with new_scope(key, f_locals) as f_locals:
f_locals['loop_index'] = loop_index
f_locals['loop_item'] = loop_item
for node in nodes:
child = node(None)
if isinstance(child, list):
iteration.extend(child)
else:
iteration.append(child)
# Add to old items list
#self.old_items.extend(old_items)
#if self._dirty:
for iteration in old_items:
for old in iteration:
if not old.is_destroyed:
old.destroy()
if len(new_items) > 0:
expanded = []
recursive_expand(sum(new_items, []), expanded)
self.parent.insert_children(self, expanded)
self.items = new_items# if self._dirty else new_items+old_items
self._iter_data = new_iter_data | python | def refresh_items(self):
""" Refresh the items of the pattern.
This method destroys the old items and creates and initializes
the new items.
"""
old_items = self.items[:]# if self._dirty else []
old_iter_data = self._iter_data# if self._dirty else {}
iterable = self.windowed_iterable
pattern_nodes = self.pattern_nodes
new_iter_data = sortedmap()
new_items = []
if iterable is not None and len(pattern_nodes) > 0:
for loop_index, loop_item in enumerate(iterable):
iteration = old_iter_data.get(loop_item)
if iteration is not None:
new_iter_data[loop_item] = iteration
new_items.append(iteration)
old_items.remove(iteration)
continue
iteration = []
new_iter_data[loop_item] = iteration
new_items.append(iteration)
for nodes, key, f_locals in pattern_nodes:
with new_scope(key, f_locals) as f_locals:
f_locals['loop_index'] = loop_index
f_locals['loop_item'] = loop_item
for node in nodes:
child = node(None)
if isinstance(child, list):
iteration.extend(child)
else:
iteration.append(child)
# Add to old items list
#self.old_items.extend(old_items)
#if self._dirty:
for iteration in old_items:
for old in iteration:
if not old.is_destroyed:
old.destroy()
if len(new_items) > 0:
expanded = []
recursive_expand(sum(new_items, []), expanded)
self.parent.insert_children(self, expanded)
self.items = new_items# if self._dirty else new_items+old_items
self._iter_data = new_iter_data | Refresh the items of the pattern.
This method destroys the old items and creates and initializes
the new items. | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/core/looper.py#L201-L252 |
frmdstryr/enamlx | enamlx/qt/qt_double_spin_box.py | QtDoubleSpinBox.create_widget | def create_widget(self):
""" Create the underlying QDoubleSpinBox widget.
"""
widget = QDoubleSpinBox(self.parent_widget())
widget.setKeyboardTracking(False)
self.widget = widget | python | def create_widget(self):
""" Create the underlying QDoubleSpinBox widget.
"""
widget = QDoubleSpinBox(self.parent_widget())
widget.setKeyboardTracking(False)
self.widget = widget | Create the underlying QDoubleSpinBox widget. | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_double_spin_box.py#L24-L30 |
frmdstryr/enamlx | enamlx/core/block.py | Block.initialize | def initialize(self):
""" A reimplemented initializer.
This method will add the include objects to the parent of the
include and ensure that they are initialized.
"""
super(Block, self).initialize()
if self.block:
self.block.parent.insert_children(self.block, self.children) | python | def initialize(self):
""" A reimplemented initializer.
This method will add the include objects to the parent of the
include and ensure that they are initialized.
"""
super(Block, self).initialize()
if self.block:
self.block.parent.insert_children(self.block, self.children) | A reimplemented initializer.
This method will add the include objects to the parent of the
include and ensure that they are initialized. | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/core/block.py#L23-L32 |
frmdstryr/enamlx | enamlx/core/block.py | Block._observe_block | def _observe_block(self, change):
""" A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True.
"""
if self.is_initialized:
if change['type'] == 'update':
old_block = change['oldvalue']
old_block.parent.remove_children(old_block,self.children)
new_block = change['value']
new_block.parent.insert_children(new_block, self.children) | python | def _observe_block(self, change):
""" A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True.
"""
if self.is_initialized:
if change['type'] == 'update':
old_block = change['oldvalue']
old_block.parent.remove_children(old_block,self.children)
new_block = change['value']
new_block.parent.insert_children(new_block, self.children) | A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True. | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/core/block.py#L35-L48 |
frmdstryr/enamlx | enamlx/qt/qt_table_view.py | QtTableViewItem._update_index | def _update_index(self):
""" Update the reference to the index within the table """
d = self.declaration
self.index = self.view.model.index(d.row, d.column)
if self.delegate:
self._refresh_count += 1
timed_call(self._loading_interval, self._update_delegate) | python | def _update_index(self):
""" Update the reference to the index within the table """
d = self.declaration
self.index = self.view.model.index(d.row, d.column)
if self.delegate:
self._refresh_count += 1
timed_call(self._loading_interval, self._update_delegate) | Update the reference to the index within the table | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_table_view.py#L161-L167 |
frmdstryr/enamlx | enamlx/qt/qt_table_view.py | QtTableViewItem._update_delegate | def _update_delegate(self):
""" Update the delegate cell widget. This is deferred so it
does not get called until the user is done scrolling.
"""
self._refresh_count -= 1
if self._refresh_count != 0:
return
try:
delegate = self.delegate
if not self.is_visible():
return
# The table destroys when it goes out of view
# so we always have to make a new one
delegate.create_widget()
delegate.init_widget()
# Set the index widget
self.view.widget.setIndexWidget(self.index, delegate.widget)
except RuntimeError:
pass | python | def _update_delegate(self):
""" Update the delegate cell widget. This is deferred so it
does not get called until the user is done scrolling.
"""
self._refresh_count -= 1
if self._refresh_count != 0:
return
try:
delegate = self.delegate
if not self.is_visible():
return
# The table destroys when it goes out of view
# so we always have to make a new one
delegate.create_widget()
delegate.init_widget()
# Set the index widget
self.view.widget.setIndexWidget(self.index, delegate.widget)
except RuntimeError:
pass | Update the delegate cell widget. This is deferred so it
does not get called until the user is done scrolling. | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_table_view.py#L169-L188 |
frmdstryr/enamlx | enamlx/qt/qt_table_view.py | QtTableViewItem.data_changed | def data_changed(self, change):
""" Notify the model that data has changed in this cell! """
index = self.index
if index:
self.view.model.dataChanged.emit(index, index) | python | def data_changed(self, change):
""" Notify the model that data has changed in this cell! """
index = self.index
if index:
self.view.model.dataChanged.emit(index, index) | Notify the model that data has changed in this cell! | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_table_view.py#L194-L198 |
frmdstryr/enamlx | enamlx/qt/qt_occ_viewer.py | QtBaseViewer.GetHandle | def GetHandle(self):
''' returns an the identifier of the GUI widget.
It must be an integer
'''
win_id = self.winId() # this returns either an int or voitptr
if "%s"%type(win_id) == "<type 'PyCObject'>": # PySide
### with PySide, self.winId() does not return an integer
if sys.platform == "win32":
## Be careful, this hack is py27 specific
## does not work with python31 or higher
## since the PyCObject api was changed
import ctypes
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [
ctypes.py_object]
win_id = ctypes.pythonapi.PyCObject_AsVoidPtr(win_id)
elif type(win_id) is not int: #PyQt4 or 5
## below integer cast may be required because self.winId() can
## returns a sip.voitptr according to the PyQt version used
## as well as the python version
win_id = int(win_id)
return win_id | python | def GetHandle(self):
''' returns an the identifier of the GUI widget.
It must be an integer
'''
win_id = self.winId() # this returns either an int or voitptr
if "%s"%type(win_id) == "<type 'PyCObject'>": # PySide
### with PySide, self.winId() does not return an integer
if sys.platform == "win32":
## Be careful, this hack is py27 specific
## does not work with python31 or higher
## since the PyCObject api was changed
import ctypes
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [
ctypes.py_object]
win_id = ctypes.pythonapi.PyCObject_AsVoidPtr(win_id)
elif type(win_id) is not int: #PyQt4 or 5
## below integer cast may be required because self.winId() can
## returns a sip.voitptr according to the PyQt version used
## as well as the python version
win_id = int(win_id)
return win_id | returns an the identifier of the GUI widget.
It must be an integer | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_occ_viewer.py#L160-L182 |
frmdstryr/enamlx | examples/occ_viewer/occ/occ_shape.py | Topology._loop_topo | def _loop_topo(self, topologyType, topologicalEntity=None, topologyTypeToAvoid=None):
'''
this could be a faces generator for a python TopoShape class
that way you can just do:
for face in srf.faces:
processFace(face)
'''
topoTypes = {TopAbs_VERTEX: TopoDS_Vertex,
TopAbs_EDGE: TopoDS_Edge,
TopAbs_FACE: TopoDS_Face,
TopAbs_WIRE: TopoDS_Wire,
TopAbs_SHELL: TopoDS_Shell,
TopAbs_SOLID: TopoDS_Solid,
TopAbs_COMPOUND: TopoDS_Compound,
TopAbs_COMPSOLID: TopoDS_CompSolid}
assert topologyType in topoTypes.keys(), '%s not one of %s' % (topologyType, topoTypes.keys())
self.topExp = TopExp_Explorer()
# use self.myShape if nothing is specified
if topologicalEntity is None and topologyTypeToAvoid is None:
self.topExp.Init(self.myShape, topologyType)
elif topologicalEntity is None and topologyTypeToAvoid is not None:
self.topExp.Init(self.myShape, topologyType, topologyTypeToAvoid)
elif topologyTypeToAvoid is None:
self.topExp.Init(topologicalEntity, topologyType)
elif topologyTypeToAvoid:
self.topExp.Init(topologicalEntity,
topologyType,
topologyTypeToAvoid)
seq = []
hashes = [] # list that stores hashes to avoid redundancy
occ_seq = TopTools_ListOfShape()
while self.topExp.More():
current_item = self.topExp.Current()
current_item_hash = current_item.__hash__()
if not current_item_hash in hashes:
hashes.append(current_item_hash)
occ_seq.Append(current_item)
self.topExp.Next()
# Convert occ_seq to python list
occ_iterator = TopTools_ListIteratorOfListOfShape(occ_seq)
while occ_iterator.More():
topo_to_add = self.topoFactory[topologyType](occ_iterator.Value())
seq.append(topo_to_add)
occ_iterator.Next()
if self.ignore_orientation:
# filter out those entities that share the same TShape
# but do *not* share the same orientation
filter_orientation_seq = []
for i in seq:
_present = False
for j in filter_orientation_seq:
if i.IsSame(j):
_present = True
break
if _present is False:
filter_orientation_seq.append(i)
return filter_orientation_seq
else:
return iter(seq) | python | def _loop_topo(self, topologyType, topologicalEntity=None, topologyTypeToAvoid=None):
'''
this could be a faces generator for a python TopoShape class
that way you can just do:
for face in srf.faces:
processFace(face)
'''
topoTypes = {TopAbs_VERTEX: TopoDS_Vertex,
TopAbs_EDGE: TopoDS_Edge,
TopAbs_FACE: TopoDS_Face,
TopAbs_WIRE: TopoDS_Wire,
TopAbs_SHELL: TopoDS_Shell,
TopAbs_SOLID: TopoDS_Solid,
TopAbs_COMPOUND: TopoDS_Compound,
TopAbs_COMPSOLID: TopoDS_CompSolid}
assert topologyType in topoTypes.keys(), '%s not one of %s' % (topologyType, topoTypes.keys())
self.topExp = TopExp_Explorer()
# use self.myShape if nothing is specified
if topologicalEntity is None and topologyTypeToAvoid is None:
self.topExp.Init(self.myShape, topologyType)
elif topologicalEntity is None and topologyTypeToAvoid is not None:
self.topExp.Init(self.myShape, topologyType, topologyTypeToAvoid)
elif topologyTypeToAvoid is None:
self.topExp.Init(topologicalEntity, topologyType)
elif topologyTypeToAvoid:
self.topExp.Init(topologicalEntity,
topologyType,
topologyTypeToAvoid)
seq = []
hashes = [] # list that stores hashes to avoid redundancy
occ_seq = TopTools_ListOfShape()
while self.topExp.More():
current_item = self.topExp.Current()
current_item_hash = current_item.__hash__()
if not current_item_hash in hashes:
hashes.append(current_item_hash)
occ_seq.Append(current_item)
self.topExp.Next()
# Convert occ_seq to python list
occ_iterator = TopTools_ListIteratorOfListOfShape(occ_seq)
while occ_iterator.More():
topo_to_add = self.topoFactory[topologyType](occ_iterator.Value())
seq.append(topo_to_add)
occ_iterator.Next()
if self.ignore_orientation:
# filter out those entities that share the same TShape
# but do *not* share the same orientation
filter_orientation_seq = []
for i in seq:
_present = False
for j in filter_orientation_seq:
if i.IsSame(j):
_present = True
break
if _present is False:
filter_orientation_seq.append(i)
return filter_orientation_seq
else:
return iter(seq) | this could be a faces generator for a python TopoShape class
that way you can just do:
for face in srf.faces:
processFace(face) | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/examples/occ_viewer/occ/occ_shape.py#L145-L207 |
frmdstryr/enamlx | examples/occ_viewer/occ/occ_shape.py | Topology._map_shapes_and_ancestors | def _map_shapes_and_ancestors(self, topoTypeA, topoTypeB, topologicalEntity):
'''
using the same method
@param topoTypeA:
@param topoTypeB:
@param topologicalEntity:
'''
topo_set = set()
_map = TopTools_IndexedDataMapOfShapeListOfShape()
topexp_MapShapesAndAncestors(self.myShape, topoTypeA, topoTypeB, _map)
results = _map.FindFromKey(topologicalEntity)
if results.IsEmpty():
yield None
topology_iterator = TopTools_ListIteratorOfListOfShape(results)
while topology_iterator.More():
topo_entity = self.topoFactory[topoTypeB](topology_iterator.Value())
# return the entity if not in set
# to assure we're not returning entities several times
if not topo_entity in topo_set:
if self.ignore_orientation:
unique = True
for i in topo_set:
if i.IsSame(topo_entity):
unique = False
break
if unique:
yield topo_entity
else:
yield topo_entity
topo_set.add(topo_entity)
topology_iterator.Next() | python | def _map_shapes_and_ancestors(self, topoTypeA, topoTypeB, topologicalEntity):
'''
using the same method
@param topoTypeA:
@param topoTypeB:
@param topologicalEntity:
'''
topo_set = set()
_map = TopTools_IndexedDataMapOfShapeListOfShape()
topexp_MapShapesAndAncestors(self.myShape, topoTypeA, topoTypeB, _map)
results = _map.FindFromKey(topologicalEntity)
if results.IsEmpty():
yield None
topology_iterator = TopTools_ListIteratorOfListOfShape(results)
while topology_iterator.More():
topo_entity = self.topoFactory[topoTypeB](topology_iterator.Value())
# return the entity if not in set
# to assure we're not returning entities several times
if not topo_entity in topo_set:
if self.ignore_orientation:
unique = True
for i in topo_set:
if i.IsSame(topo_entity):
unique = False
break
if unique:
yield topo_entity
else:
yield topo_entity
topo_set.add(topo_entity)
topology_iterator.Next() | using the same method
@param topoTypeA:
@param topoTypeB:
@param topologicalEntity: | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/examples/occ_viewer/occ/occ_shape.py#L307-L341 |
frmdstryr/enamlx | examples/occ_viewer/occ/occ_shape.py | Topology._number_shapes_ancestors | def _number_shapes_ancestors(self, topoTypeA, topoTypeB, topologicalEntity):
'''returns the number of shape ancestors
If you want to know how many edges a faces has:
_number_shapes_ancestors(self, TopAbs_EDGE, TopAbs_FACE, edg)
will return the number of edges a faces has
@param topoTypeA:
@param topoTypeB:
@param topologicalEntity:
'''
topo_set = set()
_map = TopTools_IndexedDataMapOfShapeListOfShape()
topexp_MapShapesAndAncestors(self.myShape, topoTypeA, topoTypeB, _map)
results = _map.FindFromKey(topologicalEntity)
if results.IsEmpty():
return None
topology_iterator = TopTools_ListIteratorOfListOfShape(results)
while topology_iterator.More():
topo_set.add(topology_iterator.Value())
topology_iterator.Next()
return len(topo_set) | python | def _number_shapes_ancestors(self, topoTypeA, topoTypeB, topologicalEntity):
'''returns the number of shape ancestors
If you want to know how many edges a faces has:
_number_shapes_ancestors(self, TopAbs_EDGE, TopAbs_FACE, edg)
will return the number of edges a faces has
@param topoTypeA:
@param topoTypeB:
@param topologicalEntity:
'''
topo_set = set()
_map = TopTools_IndexedDataMapOfShapeListOfShape()
topexp_MapShapesAndAncestors(self.myShape, topoTypeA, topoTypeB, _map)
results = _map.FindFromKey(topologicalEntity)
if results.IsEmpty():
return None
topology_iterator = TopTools_ListIteratorOfListOfShape(results)
while topology_iterator.More():
topo_set.add(topology_iterator.Value())
topology_iterator.Next()
return len(topo_set) | returns the number of shape ancestors
If you want to know how many edges a faces has:
_number_shapes_ancestors(self, TopAbs_EDGE, TopAbs_FACE, edg)
will return the number of edges a faces has
@param topoTypeA:
@param topoTypeB:
@param topologicalEntity: | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/examples/occ_viewer/occ/occ_shape.py#L343-L362 |
frmdstryr/enamlx | examples/occ_viewer/occ/occ_shape.py | OccDependentShape.init_layout | def init_layout(self):
""" Initialize the layout of the toolkit shape.
This method is called during the bottom-up pass. This method
should initialize the layout of the widget. The child widgets
will be fully initialized and layed out when this is called.
"""
for child in self.children():
self.child_added(child)
self.update_shape({}) | python | def init_layout(self):
""" Initialize the layout of the toolkit shape.
This method is called during the bottom-up pass. This method
should initialize the layout of the widget. The child widgets
will be fully initialized and layed out when this is called.
"""
for child in self.children():
self.child_added(child)
self.update_shape({}) | Initialize the layout of the toolkit shape.
This method is called during the bottom-up pass. This method
should initialize the layout of the widget. The child widgets
will be fully initialized and layed out when this is called. | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/examples/occ_viewer/occ/occ_shape.py#L580-L590 |
frmdstryr/enamlx | enamlx/qt/qt_key_event.py | QtKeyEvent.init_widget | def init_widget(self):
""" The KeyEvent uses the parent_widget as it's widget """
super(QtKeyEvent, self).init_widget()
d = self.declaration
widget = self.widget
self._keyPressEvent = widget.keyPressEvent
self._keyReleaseEvent = widget.keyReleaseEvent
self.set_enabled(d.enabled)
self.set_keys(d.keys) | python | def init_widget(self):
""" The KeyEvent uses the parent_widget as it's widget """
super(QtKeyEvent, self).init_widget()
d = self.declaration
widget = self.widget
self._keyPressEvent = widget.keyPressEvent
self._keyReleaseEvent = widget.keyReleaseEvent
self.set_enabled(d.enabled)
self.set_keys(d.keys) | The KeyEvent uses the parent_widget as it's widget | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_key_event.py#L47-L55 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.