repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
coderholic/pyradio | pyradio/config.py | PyRadioStations._playlist_format_changed | def _playlist_format_changed(self):
""" Check if we have new or old format
and report if format has changed
Format type can change by editing encoding,
deleting a non-utf-8 station etc.
"""
new_format = False
for n in self.stations:
if n[2] != '':
new_format = True
break
if self.new_format == new_format:
return False
else:
return True | python | def _playlist_format_changed(self):
""" Check if we have new or old format
and report if format has changed
Format type can change by editing encoding,
deleting a non-utf-8 station etc.
"""
new_format = False
for n in self.stations:
if n[2] != '':
new_format = True
break
if self.new_format == new_format:
return False
else:
return True | [
"def",
"_playlist_format_changed",
"(",
"self",
")",
":",
"new_format",
"=",
"False",
"for",
"n",
"in",
"self",
".",
"stations",
":",
"if",
"n",
"[",
"2",
"]",
"!=",
"''",
":",
"new_format",
"=",
"True",
"break",
"if",
"self",
".",
"new_format",
"==",
"new_format",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | Check if we have new or old format
and report if format has changed
Format type can change by editing encoding,
deleting a non-utf-8 station etc. | [
"Check",
"if",
"we",
"have",
"new",
"or",
"old",
"format",
"and",
"report",
"if",
"format",
"has",
"changed"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L241-L256 | train |
coderholic/pyradio | pyradio/config.py | PyRadioStations.save_playlist_file | def save_playlist_file(self, stationFile=''):
""" Save a playlist
Create a txt file and write stations in it.
Then rename it to final target
return 0: All ok
-1: Error writing file
-2: Error renaming file
"""
if self._playlist_format_changed():
self.dirty_playlist = True
self.new_format = not self.new_format
if stationFile:
st_file = stationFile
else:
st_file = self.stations_file
if not self.dirty_playlist:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Playlist not modified...')
return 0
st_new_file = st_file.replace('.csv', '.txt')
tmp_stations = self.stations[:]
tmp_stations.reverse()
if self.new_format:
tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '', '' ])
else:
tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '' ])
tmp_stations.reverse()
try:
with open(st_new_file, 'w') as cfgfile:
writter = csv.writer(cfgfile)
for a_station in tmp_stations:
writter.writerow(self._format_playlist_row(a_station))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Cannot open playlist file for writing,,,')
return -1
try:
move(st_new_file, st_file)
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Cannot rename playlist file...')
return -2
self.dirty_playlist = False
return 0 | python | def save_playlist_file(self, stationFile=''):
""" Save a playlist
Create a txt file and write stations in it.
Then rename it to final target
return 0: All ok
-1: Error writing file
-2: Error renaming file
"""
if self._playlist_format_changed():
self.dirty_playlist = True
self.new_format = not self.new_format
if stationFile:
st_file = stationFile
else:
st_file = self.stations_file
if not self.dirty_playlist:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Playlist not modified...')
return 0
st_new_file = st_file.replace('.csv', '.txt')
tmp_stations = self.stations[:]
tmp_stations.reverse()
if self.new_format:
tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '', '' ])
else:
tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '' ])
tmp_stations.reverse()
try:
with open(st_new_file, 'w') as cfgfile:
writter = csv.writer(cfgfile)
for a_station in tmp_stations:
writter.writerow(self._format_playlist_row(a_station))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Cannot open playlist file for writing,,,')
return -1
try:
move(st_new_file, st_file)
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Cannot rename playlist file...')
return -2
self.dirty_playlist = False
return 0 | [
"def",
"save_playlist_file",
"(",
"self",
",",
"stationFile",
"=",
"''",
")",
":",
"if",
"self",
".",
"_playlist_format_changed",
"(",
")",
":",
"self",
".",
"dirty_playlist",
"=",
"True",
"self",
".",
"new_format",
"=",
"not",
"self",
".",
"new_format",
"if",
"stationFile",
":",
"st_file",
"=",
"stationFile",
"else",
":",
"st_file",
"=",
"self",
".",
"stations_file",
"if",
"not",
"self",
".",
"dirty_playlist",
":",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"logger",
".",
"debug",
"(",
"'Playlist not modified...'",
")",
"return",
"0",
"st_new_file",
"=",
"st_file",
".",
"replace",
"(",
"'.csv'",
",",
"'.txt'",
")",
"tmp_stations",
"=",
"self",
".",
"stations",
"[",
":",
"]",
"tmp_stations",
".",
"reverse",
"(",
")",
"if",
"self",
".",
"new_format",
":",
"tmp_stations",
".",
"append",
"(",
"[",
"'# Find lots more stations at http://www.iheart.com'",
",",
"''",
",",
"''",
"]",
")",
"else",
":",
"tmp_stations",
".",
"append",
"(",
"[",
"'# Find lots more stations at http://www.iheart.com'",
",",
"''",
"]",
")",
"tmp_stations",
".",
"reverse",
"(",
")",
"try",
":",
"with",
"open",
"(",
"st_new_file",
",",
"'w'",
")",
"as",
"cfgfile",
":",
"writter",
"=",
"csv",
".",
"writer",
"(",
"cfgfile",
")",
"for",
"a_station",
"in",
"tmp_stations",
":",
"writter",
".",
"writerow",
"(",
"self",
".",
"_format_playlist_row",
"(",
"a_station",
")",
")",
"except",
":",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"logger",
".",
"debug",
"(",
"'Cannot open playlist file for writing,,,'",
")",
"return",
"-",
"1",
"try",
":",
"move",
"(",
"st_new_file",
",",
"st_file",
")",
"except",
":",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"logger",
".",
"debug",
"(",
"'Cannot rename playlist file...'",
")",
"return",
"-",
"2",
"self",
".",
"dirty_playlist",
"=",
"False",
"return",
"0"
] | Save a playlist
Create a txt file and write stations in it.
Then rename it to final target
return 0: All ok
-1: Error writing file
-2: Error renaming file | [
"Save",
"a",
"playlist",
"Create",
"a",
"txt",
"file",
"and",
"write",
"stations",
"in",
"it",
".",
"Then",
"rename",
"it",
"to",
"final",
"target"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L258-L306 | train |
coderholic/pyradio | pyradio/config.py | PyRadioStations._bytes_to_human | def _bytes_to_human(self, B):
''' Return the given bytes as a human friendly KB, MB, GB, or TB string '''
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0} B'.format(B)
B = float(B)
if KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB) | python | def _bytes_to_human(self, B):
''' Return the given bytes as a human friendly KB, MB, GB, or TB string '''
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0} B'.format(B)
B = float(B)
if KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB) | [
"def",
"_bytes_to_human",
"(",
"self",
",",
"B",
")",
":",
"KB",
"=",
"float",
"(",
"1024",
")",
"MB",
"=",
"float",
"(",
"KB",
"**",
"2",
")",
"# 1,048,576",
"GB",
"=",
"float",
"(",
"KB",
"**",
"3",
")",
"# 1,073,741,824",
"TB",
"=",
"float",
"(",
"KB",
"**",
"4",
")",
"# 1,099,511,627,776",
"if",
"B",
"<",
"KB",
":",
"return",
"'{0} B'",
".",
"format",
"(",
"B",
")",
"B",
"=",
"float",
"(",
"B",
")",
"if",
"KB",
"<=",
"B",
"<",
"MB",
":",
"return",
"'{0:.2f} KB'",
".",
"format",
"(",
"B",
"/",
"KB",
")",
"elif",
"MB",
"<=",
"B",
"<",
"GB",
":",
"return",
"'{0:.2f} MB'",
".",
"format",
"(",
"B",
"/",
"MB",
")",
"elif",
"GB",
"<=",
"B",
"<",
"TB",
":",
"return",
"'{0:.2f} GB'",
".",
"format",
"(",
"B",
"/",
"GB",
")",
"elif",
"TB",
"<=",
"B",
":",
"return",
"'{0:.2f} TB'",
".",
"format",
"(",
"B",
"/",
"TB",
")"
] | Return the given bytes as a human friendly KB, MB, GB, or TB string | [
"Return",
"the",
"given",
"bytes",
"as",
"a",
"human",
"friendly",
"KB",
"MB",
"GB",
"or",
"TB",
"string"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L321-L338 | train |
coderholic/pyradio | pyradio/config.py | PyRadioStations.append_station | def append_station(self, params, stationFile=''):
""" Append a station to csv file
return 0: All ok
-2 - playlist not found
-3 - negative number specified
-4 - number not found
-5: Error writing file
-6: Error renaming file
"""
if self.new_format:
if stationFile:
st_file = stationFile
else:
st_file = self.stations_file
st_file, ret = self._get_playlist_abspath_from_data(st_file)
if ret < -1:
return ret
try:
with open(st_file, 'a') as cfgfile:
writter = csv.writer(cfgfile)
writter.writerow(params)
return 0
except:
return -5
else:
self.stations.append([ params[0], params[1], params[2] ])
self.dirty_playlist = True
st_file, ret = self._get_playlist_abspath_from_data(stationFile)
if ret < -1:
return ret
ret = self.save_playlist_file(st_file)
if ret < 0:
ret -= 4
return ret | python | def append_station(self, params, stationFile=''):
""" Append a station to csv file
return 0: All ok
-2 - playlist not found
-3 - negative number specified
-4 - number not found
-5: Error writing file
-6: Error renaming file
"""
if self.new_format:
if stationFile:
st_file = stationFile
else:
st_file = self.stations_file
st_file, ret = self._get_playlist_abspath_from_data(st_file)
if ret < -1:
return ret
try:
with open(st_file, 'a') as cfgfile:
writter = csv.writer(cfgfile)
writter.writerow(params)
return 0
except:
return -5
else:
self.stations.append([ params[0], params[1], params[2] ])
self.dirty_playlist = True
st_file, ret = self._get_playlist_abspath_from_data(stationFile)
if ret < -1:
return ret
ret = self.save_playlist_file(st_file)
if ret < 0:
ret -= 4
return ret | [
"def",
"append_station",
"(",
"self",
",",
"params",
",",
"stationFile",
"=",
"''",
")",
":",
"if",
"self",
".",
"new_format",
":",
"if",
"stationFile",
":",
"st_file",
"=",
"stationFile",
"else",
":",
"st_file",
"=",
"self",
".",
"stations_file",
"st_file",
",",
"ret",
"=",
"self",
".",
"_get_playlist_abspath_from_data",
"(",
"st_file",
")",
"if",
"ret",
"<",
"-",
"1",
":",
"return",
"ret",
"try",
":",
"with",
"open",
"(",
"st_file",
",",
"'a'",
")",
"as",
"cfgfile",
":",
"writter",
"=",
"csv",
".",
"writer",
"(",
"cfgfile",
")",
"writter",
".",
"writerow",
"(",
"params",
")",
"return",
"0",
"except",
":",
"return",
"-",
"5",
"else",
":",
"self",
".",
"stations",
".",
"append",
"(",
"[",
"params",
"[",
"0",
"]",
",",
"params",
"[",
"1",
"]",
",",
"params",
"[",
"2",
"]",
"]",
")",
"self",
".",
"dirty_playlist",
"=",
"True",
"st_file",
",",
"ret",
"=",
"self",
".",
"_get_playlist_abspath_from_data",
"(",
"stationFile",
")",
"if",
"ret",
"<",
"-",
"1",
":",
"return",
"ret",
"ret",
"=",
"self",
".",
"save_playlist_file",
"(",
"st_file",
")",
"if",
"ret",
"<",
"0",
":",
"ret",
"-=",
"4",
"return",
"ret"
] | Append a station to csv file
return 0: All ok
-2 - playlist not found
-3 - negative number specified
-4 - number not found
-5: Error writing file
-6: Error renaming file | [
"Append",
"a",
"station",
"to",
"csv",
"file"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L340-L375 | train |
coderholic/pyradio | pyradio/config.py | PyRadioConfig._check_config_file | def _check_config_file(self, usr):
''' Make sure a config file exists in the config dir '''
package_config_file = path.join(path.dirname(__file__), 'config')
user_config_file = path.join(usr, 'config')
''' restore config from bck file '''
if path.exists(user_config_file + '.restore'):
try:
copyfile(user_config_file + '.restore', user_config_file)
remove(self.user_config_file + '.restore')
except:
pass
''' Copy package config into user dir '''
if not path.exists(user_config_file):
copyfile(package_config_file, user_config_file) | python | def _check_config_file(self, usr):
''' Make sure a config file exists in the config dir '''
package_config_file = path.join(path.dirname(__file__), 'config')
user_config_file = path.join(usr, 'config')
''' restore config from bck file '''
if path.exists(user_config_file + '.restore'):
try:
copyfile(user_config_file + '.restore', user_config_file)
remove(self.user_config_file + '.restore')
except:
pass
''' Copy package config into user dir '''
if not path.exists(user_config_file):
copyfile(package_config_file, user_config_file) | [
"def",
"_check_config_file",
"(",
"self",
",",
"usr",
")",
":",
"package_config_file",
"=",
"path",
".",
"join",
"(",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'config'",
")",
"user_config_file",
"=",
"path",
".",
"join",
"(",
"usr",
",",
"'config'",
")",
"''' restore config from bck file '''",
"if",
"path",
".",
"exists",
"(",
"user_config_file",
"+",
"'.restore'",
")",
":",
"try",
":",
"copyfile",
"(",
"user_config_file",
"+",
"'.restore'",
",",
"user_config_file",
")",
"remove",
"(",
"self",
".",
"user_config_file",
"+",
"'.restore'",
")",
"except",
":",
"pass",
"''' Copy package config into user dir '''",
"if",
"not",
"path",
".",
"exists",
"(",
"user_config_file",
")",
":",
"copyfile",
"(",
"package_config_file",
",",
"user_config_file",
")"
] | Make sure a config file exists in the config dir | [
"Make",
"sure",
"a",
"config",
"file",
"exists",
"in",
"the",
"config",
"dir"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L566-L581 | train |
coderholic/pyradio | pyradio/config.py | PyRadioConfig.save_config | def save_config(self):
""" Save config file
Creates config.restore (back up file)
Returns:
-1: Error saving config
0: Config saved successfully
1: Config not saved (not modified"""
if not self.opts['dirty_config'][1]:
if logger.isEnabledFor(logging.INFO):
logger.info('Config not saved (not modified)')
return 1
txt ='''# PyRadio Configuration File
# Player selection
# This is the equivalent to the -u , --use-player command line parameter
# Specify the player to use with PyRadio, or the player detection order
# Example:
# player = vlc
# or
# player = vlc,mpv, mplayer
# Default value: mpv,mplayer,vlc
player = {0}
# Default playlist
# This is the playlist to open if none is specified
# You can scecify full path to CSV file, or if the playlist is in the
# config directory, playlist name (filename without extension) or
# playlist number (as reported by -ls command line option)
# Default value: stations
default_playlist = {1}
# Default station
# This is the equivalent to the -p , --play command line parameter
# The station number within the default playlist to play
# Value is 1..number of stations, "-1" or "False" means no auto play
# "0" or "Random" means play a random station
# Default value: False
default_station = {2}
# Default encoding
# This is the encoding used by default when reading data provided by
# a station (such as song title, etc.) If reading said data ends up
# in an error, 'utf-8' will be used instead.
#
# A valid encoding list can be found at:
# https://docs.python.org/2.7/library/codecs.html#standard-encodings
# replacing 2.7 with specific version:
# 3.0 up to current python version.
#
# Default value: utf-8
default_encoding = {3}
# Connection timeout
# PyRadio will wait for this number of seconds to get a station/server
# message indicating that playback has actually started.
# If this does not happen (within this number of seconds after the
# connection is initiated), PyRadio will consider the station
# unreachable, and display the "Failed to connect to: [station]"
# message.
#
# Valid values: 5 - 60
# Default value: 10
connection_timeout = {4}
# Default theme
# Hardcooded themes:
# dark (default) (8 colors)
# light (8 colors)
# dark_16_colors (16 colors dark theme alternative)
# light_16_colors (16 colors light theme alternative)
# black_on_white (bow) (256 colors)
# white_on_black (wob) (256 colors)
# Default value = 'dark'
theme = {5}
# Transparency setting
# If False, theme colors will be used.
# If True and a compositor is running, the stations' window
# background will be transparent. If True and a compositor is
# not running, the terminal's background color will be used.
# Valid values: True, true, False, false
# Default value: False
use_transparency = {6}
# Playlist management
#
# Specify whether you will be asked to confirm
# every station deletion action
# Valid values: True, true, False, false
# Default value: True
confirm_station_deletion = {7}
# Specify whether you will be asked to confirm
# playlist reloading, when the playlist has not
# been modified within Pyradio
# Valid values: True, true, False, false
# Default value: True
confirm_playlist_reload = {8}
# Specify whether you will be asked to save a
# modified playlist whenever it needs saving
# Valid values: True, true, False, false
# Default value: False
auto_save_playlist = {9}
'''
copyfile(self.config_file, self.config_file + '.restore')
if self.opts['default_station'][1] is None:
self.opts['default_station'][1] = '-1'
try:
with open(self.config_file, 'w') as cfgfile:
cfgfile.write(txt.format(self.opts['player'][1],
self.opts['default_playlist'][1],
self.opts['default_station'][1],
self.opts['default_encoding'][1],
self.opts['connection_timeout'][1],
self.opts['theme'][1],
self.opts['use_transparency'][1],
self.opts['confirm_station_deletion'][1],
self.opts['confirm_playlist_reload'][1],
self.opts['auto_save_playlist'][1]))
except:
if logger.isEnabledFor(logging.ERROR):
logger.error('Error saving config')
return -1
try:
remove(self.config_file + '.restore')
except:
pass
if logger.isEnabledFor(logging.INFO):
logger.info('Config saved')
self.opts['dirty_config'][1] = False
return 0 | python | def save_config(self):
""" Save config file
Creates config.restore (back up file)
Returns:
-1: Error saving config
0: Config saved successfully
1: Config not saved (not modified"""
if not self.opts['dirty_config'][1]:
if logger.isEnabledFor(logging.INFO):
logger.info('Config not saved (not modified)')
return 1
txt ='''# PyRadio Configuration File
# Player selection
# This is the equivalent to the -u , --use-player command line parameter
# Specify the player to use with PyRadio, or the player detection order
# Example:
# player = vlc
# or
# player = vlc,mpv, mplayer
# Default value: mpv,mplayer,vlc
player = {0}
# Default playlist
# This is the playlist to open if none is specified
# You can scecify full path to CSV file, or if the playlist is in the
# config directory, playlist name (filename without extension) or
# playlist number (as reported by -ls command line option)
# Default value: stations
default_playlist = {1}
# Default station
# This is the equivalent to the -p , --play command line parameter
# The station number within the default playlist to play
# Value is 1..number of stations, "-1" or "False" means no auto play
# "0" or "Random" means play a random station
# Default value: False
default_station = {2}
# Default encoding
# This is the encoding used by default when reading data provided by
# a station (such as song title, etc.) If reading said data ends up
# in an error, 'utf-8' will be used instead.
#
# A valid encoding list can be found at:
# https://docs.python.org/2.7/library/codecs.html#standard-encodings
# replacing 2.7 with specific version:
# 3.0 up to current python version.
#
# Default value: utf-8
default_encoding = {3}
# Connection timeout
# PyRadio will wait for this number of seconds to get a station/server
# message indicating that playback has actually started.
# If this does not happen (within this number of seconds after the
# connection is initiated), PyRadio will consider the station
# unreachable, and display the "Failed to connect to: [station]"
# message.
#
# Valid values: 5 - 60
# Default value: 10
connection_timeout = {4}
# Default theme
# Hardcooded themes:
# dark (default) (8 colors)
# light (8 colors)
# dark_16_colors (16 colors dark theme alternative)
# light_16_colors (16 colors light theme alternative)
# black_on_white (bow) (256 colors)
# white_on_black (wob) (256 colors)
# Default value = 'dark'
theme = {5}
# Transparency setting
# If False, theme colors will be used.
# If True and a compositor is running, the stations' window
# background will be transparent. If True and a compositor is
# not running, the terminal's background color will be used.
# Valid values: True, true, False, false
# Default value: False
use_transparency = {6}
# Playlist management
#
# Specify whether you will be asked to confirm
# every station deletion action
# Valid values: True, true, False, false
# Default value: True
confirm_station_deletion = {7}
# Specify whether you will be asked to confirm
# playlist reloading, when the playlist has not
# been modified within Pyradio
# Valid values: True, true, False, false
# Default value: True
confirm_playlist_reload = {8}
# Specify whether you will be asked to save a
# modified playlist whenever it needs saving
# Valid values: True, true, False, false
# Default value: False
auto_save_playlist = {9}
'''
copyfile(self.config_file, self.config_file + '.restore')
if self.opts['default_station'][1] is None:
self.opts['default_station'][1] = '-1'
try:
with open(self.config_file, 'w') as cfgfile:
cfgfile.write(txt.format(self.opts['player'][1],
self.opts['default_playlist'][1],
self.opts['default_station'][1],
self.opts['default_encoding'][1],
self.opts['connection_timeout'][1],
self.opts['theme'][1],
self.opts['use_transparency'][1],
self.opts['confirm_station_deletion'][1],
self.opts['confirm_playlist_reload'][1],
self.opts['auto_save_playlist'][1]))
except:
if logger.isEnabledFor(logging.ERROR):
logger.error('Error saving config')
return -1
try:
remove(self.config_file + '.restore')
except:
pass
if logger.isEnabledFor(logging.INFO):
logger.info('Config saved')
self.opts['dirty_config'][1] = False
return 0 | [
"def",
"save_config",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"opts",
"[",
"'dirty_config'",
"]",
"[",
"1",
"]",
":",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"INFO",
")",
":",
"logger",
".",
"info",
"(",
"'Config not saved (not modified)'",
")",
"return",
"1",
"txt",
"=",
"'''# PyRadio Configuration File\n\n# Player selection\n# This is the equivalent to the -u , --use-player command line parameter\n# Specify the player to use with PyRadio, or the player detection order\n# Example:\n# player = vlc\n# or\n# player = vlc,mpv, mplayer\n# Default value: mpv,mplayer,vlc\nplayer = {0}\n\n# Default playlist\n# This is the playlist to open if none is specified\n# You can scecify full path to CSV file, or if the playlist is in the\n# config directory, playlist name (filename without extension) or\n# playlist number (as reported by -ls command line option)\n# Default value: stations\ndefault_playlist = {1}\n\n# Default station\n# This is the equivalent to the -p , --play command line parameter\n# The station number within the default playlist to play\n# Value is 1..number of stations, \"-1\" or \"False\" means no auto play\n# \"0\" or \"Random\" means play a random station\n# Default value: False\ndefault_station = {2}\n\n# Default encoding\n# This is the encoding used by default when reading data provided by\n# a station (such as song title, etc.) If reading said data ends up\n# in an error, 'utf-8' will be used instead.\n#\n# A valid encoding list can be found at:\n# https://docs.python.org/2.7/library/codecs.html#standard-encodings\n# replacing 2.7 with specific version:\n# 3.0 up to current python version.\n#\n# Default value: utf-8\ndefault_encoding = {3}\n\n# Connection timeout\n# PyRadio will wait for this number of seconds to get a station/server\n# message indicating that playback has actually started.\n# If this does not happen (within this number of seconds after the\n# connection is initiated), PyRadio will consider the station\n# unreachable, and display the \"Failed to connect to: [station]\"\n# message.\n#\n# Valid values: 5 - 60\n# Default value: 10\nconnection_timeout = {4}\n\n# Default theme\n# Hardcooded themes:\n# dark (default) (8 colors)\n# light (8 colors)\n# dark_16_colors (16 colors dark theme alternative)\n# light_16_colors (16 colors light theme alternative)\n# black_on_white (bow) (256 colors)\n# white_on_black (wob) (256 colors)\n# Default value = 'dark'\ntheme = {5}\n\n# Transparency setting\n# If False, theme colors will be used.\n# If True and a compositor is running, the stations' window\n# background will be transparent. If True and a compositor is\n# not running, the terminal's background color will be used.\n# Valid values: True, true, False, false\n# Default value: False\nuse_transparency = {6}\n\n\n# Playlist management\n#\n# Specify whether you will be asked to confirm\n# every station deletion action\n# Valid values: True, true, False, false\n# Default value: True\nconfirm_station_deletion = {7}\n\n# Specify whether you will be asked to confirm\n# playlist reloading, when the playlist has not\n# been modified within Pyradio\n# Valid values: True, true, False, false\n# Default value: True\nconfirm_playlist_reload = {8}\n\n# Specify whether you will be asked to save a\n# modified playlist whenever it needs saving\n# Valid values: True, true, False, false\n# Default value: False\nauto_save_playlist = {9}\n\n'''",
"copyfile",
"(",
"self",
".",
"config_file",
",",
"self",
".",
"config_file",
"+",
"'.restore'",
")",
"if",
"self",
".",
"opts",
"[",
"'default_station'",
"]",
"[",
"1",
"]",
"is",
"None",
":",
"self",
".",
"opts",
"[",
"'default_station'",
"]",
"[",
"1",
"]",
"=",
"'-1'",
"try",
":",
"with",
"open",
"(",
"self",
".",
"config_file",
",",
"'w'",
")",
"as",
"cfgfile",
":",
"cfgfile",
".",
"write",
"(",
"txt",
".",
"format",
"(",
"self",
".",
"opts",
"[",
"'player'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'default_playlist'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'default_station'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'default_encoding'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'connection_timeout'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'theme'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'use_transparency'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'confirm_station_deletion'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'confirm_playlist_reload'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"opts",
"[",
"'auto_save_playlist'",
"]",
"[",
"1",
"]",
")",
")",
"except",
":",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"ERROR",
")",
":",
"logger",
".",
"error",
"(",
"'Error saving config'",
")",
"return",
"-",
"1",
"try",
":",
"remove",
"(",
"self",
".",
"config_file",
"+",
"'.restore'",
")",
"except",
":",
"pass",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"INFO",
")",
":",
"logger",
".",
"info",
"(",
"'Config saved'",
")",
"self",
".",
"opts",
"[",
"'dirty_config'",
"]",
"[",
"1",
"]",
"=",
"False",
"return",
"0"
] | Save config file
Creates config.restore (back up file)
Returns:
-1: Error saving config
0: Config saved successfully
1: Config not saved (not modified | [
"Save",
"config",
"file"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L639-L773 | train |
coderholic/pyradio | pyradio/radio.py | PyRadio.ctrl_c_handler | def ctrl_c_handler(self, signum, frame):
self.ctrl_c_pressed = True
if self._cnf.dirty_playlist:
""" Try to auto save playlist on exit
Do not check result!!! """
self.saveCurrentPlaylist()
""" Try to auto save config on exit
Do not check result!!! """
self._cnf.save_config() | python | def ctrl_c_handler(self, signum, frame):
self.ctrl_c_pressed = True
if self._cnf.dirty_playlist:
""" Try to auto save playlist on exit
Do not check result!!! """
self.saveCurrentPlaylist()
""" Try to auto save config on exit
Do not check result!!! """
self._cnf.save_config() | [
"def",
"ctrl_c_handler",
"(",
"self",
",",
"signum",
",",
"frame",
")",
":",
"self",
".",
"ctrl_c_pressed",
"=",
"True",
"if",
"self",
".",
"_cnf",
".",
"dirty_playlist",
":",
"\"\"\" Try to auto save playlist on exit\n Do not check result!!! \"\"\"",
"self",
".",
"saveCurrentPlaylist",
"(",
")",
"self",
".",
"_cnf",
".",
"save_config",
"(",
")"
] | Try to auto save config on exit
Do not check result!!! | [
"Try",
"to",
"auto",
"save",
"config",
"on",
"exit",
"Do",
"not",
"check",
"result!!!"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/radio.py#L432-L440 | train |
coderholic/pyradio | pyradio/radio.py | PyRadio._goto_playing_station | def _goto_playing_station(self, changing_playlist=False):
""" make sure playing station is visible """
if (self.player.isPlaying() or self.operation_mode == PLAYLIST_MODE) and \
(self.selection != self.playing or changing_playlist):
if changing_playlist:
self.startPos = 0
max_lines = self.bodyMaxY - 2
if logger.isEnabledFor(logging.INFO):
logger.info('max_lines = {0}, self.playing = {1}'.format(max_lines, self.playing))
if self.number_of_items < max_lines:
self.startPos = 0
elif self.playing < self.startPos or \
self.playing >= self.startPos + max_lines:
if logger.isEnabledFor(logging.INFO):
logger.info('=== _goto:adjusting startPos')
if self.playing < max_lines:
self.startPos = 0
if self.playing - int(max_lines/2) > 0:
self.startPos = self.playing - int(max_lines/2)
elif self.playing > self.number_of_items - max_lines:
self.startPos = self.number_of_items - max_lines
else:
self.startPos = int(self.playing+1/max_lines) - int(max_lines/2)
if logger.isEnabledFor(logging.INFO):
logger.info('===== _goto:startPos = {0}, changing_playlist = {1}'.format(self.startPos, changing_playlist))
self.selection = self.playing
self.refreshBody() | python | def _goto_playing_station(self, changing_playlist=False):
""" make sure playing station is visible """
if (self.player.isPlaying() or self.operation_mode == PLAYLIST_MODE) and \
(self.selection != self.playing or changing_playlist):
if changing_playlist:
self.startPos = 0
max_lines = self.bodyMaxY - 2
if logger.isEnabledFor(logging.INFO):
logger.info('max_lines = {0}, self.playing = {1}'.format(max_lines, self.playing))
if self.number_of_items < max_lines:
self.startPos = 0
elif self.playing < self.startPos or \
self.playing >= self.startPos + max_lines:
if logger.isEnabledFor(logging.INFO):
logger.info('=== _goto:adjusting startPos')
if self.playing < max_lines:
self.startPos = 0
if self.playing - int(max_lines/2) > 0:
self.startPos = self.playing - int(max_lines/2)
elif self.playing > self.number_of_items - max_lines:
self.startPos = self.number_of_items - max_lines
else:
self.startPos = int(self.playing+1/max_lines) - int(max_lines/2)
if logger.isEnabledFor(logging.INFO):
logger.info('===== _goto:startPos = {0}, changing_playlist = {1}'.format(self.startPos, changing_playlist))
self.selection = self.playing
self.refreshBody() | [
"def",
"_goto_playing_station",
"(",
"self",
",",
"changing_playlist",
"=",
"False",
")",
":",
"if",
"(",
"self",
".",
"player",
".",
"isPlaying",
"(",
")",
"or",
"self",
".",
"operation_mode",
"==",
"PLAYLIST_MODE",
")",
"and",
"(",
"self",
".",
"selection",
"!=",
"self",
".",
"playing",
"or",
"changing_playlist",
")",
":",
"if",
"changing_playlist",
":",
"self",
".",
"startPos",
"=",
"0",
"max_lines",
"=",
"self",
".",
"bodyMaxY",
"-",
"2",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"INFO",
")",
":",
"logger",
".",
"info",
"(",
"'max_lines = {0}, self.playing = {1}'",
".",
"format",
"(",
"max_lines",
",",
"self",
".",
"playing",
")",
")",
"if",
"self",
".",
"number_of_items",
"<",
"max_lines",
":",
"self",
".",
"startPos",
"=",
"0",
"elif",
"self",
".",
"playing",
"<",
"self",
".",
"startPos",
"or",
"self",
".",
"playing",
">=",
"self",
".",
"startPos",
"+",
"max_lines",
":",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"INFO",
")",
":",
"logger",
".",
"info",
"(",
"'=== _goto:adjusting startPos'",
")",
"if",
"self",
".",
"playing",
"<",
"max_lines",
":",
"self",
".",
"startPos",
"=",
"0",
"if",
"self",
".",
"playing",
"-",
"int",
"(",
"max_lines",
"/",
"2",
")",
">",
"0",
":",
"self",
".",
"startPos",
"=",
"self",
".",
"playing",
"-",
"int",
"(",
"max_lines",
"/",
"2",
")",
"elif",
"self",
".",
"playing",
">",
"self",
".",
"number_of_items",
"-",
"max_lines",
":",
"self",
".",
"startPos",
"=",
"self",
".",
"number_of_items",
"-",
"max_lines",
"else",
":",
"self",
".",
"startPos",
"=",
"int",
"(",
"self",
".",
"playing",
"+",
"1",
"/",
"max_lines",
")",
"-",
"int",
"(",
"max_lines",
"/",
"2",
")",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"INFO",
")",
":",
"logger",
".",
"info",
"(",
"'===== _goto:startPos = {0}, changing_playlist = {1}'",
".",
"format",
"(",
"self",
".",
"startPos",
",",
"changing_playlist",
")",
")",
"self",
".",
"selection",
"=",
"self",
".",
"playing",
"self",
".",
"refreshBody",
"(",
")"
] | make sure playing station is visible | [
"make",
"sure",
"playing",
"station",
"is",
"visible"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/radio.py#L442-L468 | train |
coderholic/pyradio | pyradio/radio.py | PyRadio.setStation | def setStation(self, number):
""" Select the given station number """
# If we press up at the first station, we go to the last one
# and if we press down on the last one we go back to the first one.
if number < 0:
number = len(self.stations) - 1
elif number >= len(self.stations):
number = 0
self.selection = number
maxDisplayedItems = self.bodyMaxY - 2
if self.selection - self.startPos >= maxDisplayedItems:
self.startPos = self.selection - maxDisplayedItems + 1
elif self.selection < self.startPos:
self.startPos = self.selection | python | def setStation(self, number):
""" Select the given station number """
# If we press up at the first station, we go to the last one
# and if we press down on the last one we go back to the first one.
if number < 0:
number = len(self.stations) - 1
elif number >= len(self.stations):
number = 0
self.selection = number
maxDisplayedItems = self.bodyMaxY - 2
if self.selection - self.startPos >= maxDisplayedItems:
self.startPos = self.selection - maxDisplayedItems + 1
elif self.selection < self.startPos:
self.startPos = self.selection | [
"def",
"setStation",
"(",
"self",
",",
"number",
")",
":",
"# If we press up at the first station, we go to the last one",
"# and if we press down on the last one we go back to the first one.",
"if",
"number",
"<",
"0",
":",
"number",
"=",
"len",
"(",
"self",
".",
"stations",
")",
"-",
"1",
"elif",
"number",
">=",
"len",
"(",
"self",
".",
"stations",
")",
":",
"number",
"=",
"0",
"self",
".",
"selection",
"=",
"number",
"maxDisplayedItems",
"=",
"self",
".",
"bodyMaxY",
"-",
"2",
"if",
"self",
".",
"selection",
"-",
"self",
".",
"startPos",
">=",
"maxDisplayedItems",
":",
"self",
".",
"startPos",
"=",
"self",
".",
"selection",
"-",
"maxDisplayedItems",
"+",
"1",
"elif",
"self",
".",
"selection",
"<",
"self",
".",
"startPos",
":",
"self",
".",
"startPos",
"=",
"self",
".",
"selection"
] | Select the given station number | [
"Select",
"the",
"given",
"station",
"number"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/radio.py#L489-L504 | train |
coderholic/pyradio | pyradio/radio.py | PyRadio._format_playlist_line | def _format_playlist_line(self, lineNum, pad, station):
""" format playlist line so that if fills self.maxX """
line = "{0}. {1}".format(str(lineNum + self.startPos + 1).rjust(pad), station[0])
f_data = ' [{0}, {1}]'.format(station[2], station[1])
if version_info < (3, 0):
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX -2:
""" this is too long, try to shorten it
by removing file size """
f_data = ' [{0}]'.format(station[1])
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 2:
""" still too long. start removing chars """
while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 3:
f_data = f_data[:-1]
f_data += ']'
""" if too short, pad f_data to the right """
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2:
while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2:
line += ' '
else:
if len(line) + len(f_data) > self.bodyMaxX -2:
""" this is too long, try to shorten it
by removing file size """
f_data = ' [{0}]'.format(station[1])
if len(line) + len(f_data) > self.bodyMaxX - 2:
""" still too long. start removing chars """
while len(line) + len(f_data) > self.bodyMaxX - 3:
f_data = f_data[:-1]
f_data += ']'
""" if too short, pad f_data to the right """
if len(line) + len(f_data) < self.maxX - 2:
while len(line) + len(f_data) < self.maxX - 2:
line += ' '
line += f_data
return line | python | def _format_playlist_line(self, lineNum, pad, station):
""" format playlist line so that if fills self.maxX """
line = "{0}. {1}".format(str(lineNum + self.startPos + 1).rjust(pad), station[0])
f_data = ' [{0}, {1}]'.format(station[2], station[1])
if version_info < (3, 0):
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX -2:
""" this is too long, try to shorten it
by removing file size """
f_data = ' [{0}]'.format(station[1])
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 2:
""" still too long. start removing chars """
while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 3:
f_data = f_data[:-1]
f_data += ']'
""" if too short, pad f_data to the right """
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2:
while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2:
line += ' '
else:
if len(line) + len(f_data) > self.bodyMaxX -2:
""" this is too long, try to shorten it
by removing file size """
f_data = ' [{0}]'.format(station[1])
if len(line) + len(f_data) > self.bodyMaxX - 2:
""" still too long. start removing chars """
while len(line) + len(f_data) > self.bodyMaxX - 3:
f_data = f_data[:-1]
f_data += ']'
""" if too short, pad f_data to the right """
if len(line) + len(f_data) < self.maxX - 2:
while len(line) + len(f_data) < self.maxX - 2:
line += ' '
line += f_data
return line | [
"def",
"_format_playlist_line",
"(",
"self",
",",
"lineNum",
",",
"pad",
",",
"station",
")",
":",
"line",
"=",
"\"{0}. {1}\"",
".",
"format",
"(",
"str",
"(",
"lineNum",
"+",
"self",
".",
"startPos",
"+",
"1",
")",
".",
"rjust",
"(",
"pad",
")",
",",
"station",
"[",
"0",
"]",
")",
"f_data",
"=",
"' [{0}, {1}]'",
".",
"format",
"(",
"station",
"[",
"2",
"]",
",",
"station",
"[",
"1",
"]",
")",
"if",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
":",
"if",
"len",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"+",
"len",
"(",
"f_data",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
">",
"self",
".",
"bodyMaxX",
"-",
"2",
":",
"\"\"\" this is too long, try to shorten it\n by removing file size \"\"\"",
"f_data",
"=",
"' [{0}]'",
".",
"format",
"(",
"station",
"[",
"1",
"]",
")",
"if",
"len",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"+",
"len",
"(",
"f_data",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
">",
"self",
".",
"bodyMaxX",
"-",
"2",
":",
"\"\"\" still too long. start removing chars \"\"\"",
"while",
"len",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"+",
"len",
"(",
"f_data",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
">",
"self",
".",
"bodyMaxX",
"-",
"3",
":",
"f_data",
"=",
"f_data",
"[",
":",
"-",
"1",
"]",
"f_data",
"+=",
"']'",
"\"\"\" if too short, pad f_data to the right \"\"\"",
"if",
"len",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"+",
"len",
"(",
"f_data",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"<",
"self",
".",
"maxX",
"-",
"2",
":",
"while",
"len",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"+",
"len",
"(",
"f_data",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"<",
"self",
".",
"maxX",
"-",
"2",
":",
"line",
"+=",
"' '",
"else",
":",
"if",
"len",
"(",
"line",
")",
"+",
"len",
"(",
"f_data",
")",
">",
"self",
".",
"bodyMaxX",
"-",
"2",
":",
"\"\"\" this is too long, try to shorten it\n by removing file size \"\"\"",
"f_data",
"=",
"' [{0}]'",
".",
"format",
"(",
"station",
"[",
"1",
"]",
")",
"if",
"len",
"(",
"line",
")",
"+",
"len",
"(",
"f_data",
")",
">",
"self",
".",
"bodyMaxX",
"-",
"2",
":",
"\"\"\" still too long. start removing chars \"\"\"",
"while",
"len",
"(",
"line",
")",
"+",
"len",
"(",
"f_data",
")",
">",
"self",
".",
"bodyMaxX",
"-",
"3",
":",
"f_data",
"=",
"f_data",
"[",
":",
"-",
"1",
"]",
"f_data",
"+=",
"']'",
"\"\"\" if too short, pad f_data to the right \"\"\"",
"if",
"len",
"(",
"line",
")",
"+",
"len",
"(",
"f_data",
")",
"<",
"self",
".",
"maxX",
"-",
"2",
":",
"while",
"len",
"(",
"line",
")",
"+",
"len",
"(",
"f_data",
")",
"<",
"self",
".",
"maxX",
"-",
"2",
":",
"line",
"+=",
"' '",
"line",
"+=",
"f_data",
"return",
"line"
] | format playlist line so that if fills self.maxX | [
"format",
"playlist",
"line",
"so",
"that",
"if",
"fills",
"self",
".",
"maxX"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/radio.py#L772-L805 | train |
coderholic/pyradio | pyradio/config_window.py | PyRadioSelectEncodings._resize | def _resize(self, init=False):
col, row = self._selection_to_col_row(self.selection)
if not (self.startPos <= row <= self.startPos + self.list_maxY - 1):
while row > self.startPos:
self.startPos += 1
while row < self.startPos + self.list_maxY - 1:
self.startPos -= 1
''' if the selection at the end of the list,
try to scroll down '''
if init and row > self.list_maxY:
new_startPos = self._num_of_rows - self.list_maxY + 1
if row > new_startPos:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('setting startPos at {}'.format(new_startPos))
self.startPos = new_startPos
self.refresh_selection() | python | def _resize(self, init=False):
col, row = self._selection_to_col_row(self.selection)
if not (self.startPos <= row <= self.startPos + self.list_maxY - 1):
while row > self.startPos:
self.startPos += 1
while row < self.startPos + self.list_maxY - 1:
self.startPos -= 1
''' if the selection at the end of the list,
try to scroll down '''
if init and row > self.list_maxY:
new_startPos = self._num_of_rows - self.list_maxY + 1
if row > new_startPos:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('setting startPos at {}'.format(new_startPos))
self.startPos = new_startPos
self.refresh_selection() | [
"def",
"_resize",
"(",
"self",
",",
"init",
"=",
"False",
")",
":",
"col",
",",
"row",
"=",
"self",
".",
"_selection_to_col_row",
"(",
"self",
".",
"selection",
")",
"if",
"not",
"(",
"self",
".",
"startPos",
"<=",
"row",
"<=",
"self",
".",
"startPos",
"+",
"self",
".",
"list_maxY",
"-",
"1",
")",
":",
"while",
"row",
">",
"self",
".",
"startPos",
":",
"self",
".",
"startPos",
"+=",
"1",
"while",
"row",
"<",
"self",
".",
"startPos",
"+",
"self",
".",
"list_maxY",
"-",
"1",
":",
"self",
".",
"startPos",
"-=",
"1",
"if",
"init",
"and",
"row",
">",
"self",
".",
"list_maxY",
":",
"new_startPos",
"=",
"self",
".",
"_num_of_rows",
"-",
"self",
".",
"list_maxY",
"+",
"1",
"if",
"row",
">",
"new_startPos",
":",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"logger",
".",
"debug",
"(",
"'setting startPos at {}'",
".",
"format",
"(",
"new_startPos",
")",
")",
"self",
".",
"startPos",
"=",
"new_startPos",
"self",
".",
"refresh_selection",
"(",
")"
] | if the selection at the end of the list,
try to scroll down | [
"if",
"the",
"selection",
"at",
"the",
"end",
"of",
"the",
"list",
"try",
"to",
"scroll",
"down"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config_window.py#L745-L760 | train |
coderholic/pyradio | pyradio/simple_curses_widgets.py | SimpleCursesLineEdit._get_char | def _get_char(self, win, char):
def get_check_next_byte():
char = win.getch()
if 128 <= char <= 191:
return char
else:
raise UnicodeError
bytes = []
if char <= 127:
# 1 bytes
bytes.append(char)
#elif 194 <= char <= 223:
elif 192 <= char <= 223:
# 2 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
elif 224 <= char <= 239:
# 3 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
elif 240 <= char <= 244:
# 4 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
#print('bytes = {}'.format(bytes))
""" no zero byte allowed """
while 0 in bytes:
bytes.remove(0)
if version_info < (3, 0):
out = ''.join([chr(b) for b in bytes])
else:
buf = bytearray(bytes)
out = self._decode_string(buf)
#out = buf.decode('utf-8')
return out | python | def _get_char(self, win, char):
def get_check_next_byte():
char = win.getch()
if 128 <= char <= 191:
return char
else:
raise UnicodeError
bytes = []
if char <= 127:
# 1 bytes
bytes.append(char)
#elif 194 <= char <= 223:
elif 192 <= char <= 223:
# 2 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
elif 224 <= char <= 239:
# 3 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
elif 240 <= char <= 244:
# 4 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
#print('bytes = {}'.format(bytes))
""" no zero byte allowed """
while 0 in bytes:
bytes.remove(0)
if version_info < (3, 0):
out = ''.join([chr(b) for b in bytes])
else:
buf = bytearray(bytes)
out = self._decode_string(buf)
#out = buf.decode('utf-8')
return out | [
"def",
"_get_char",
"(",
"self",
",",
"win",
",",
"char",
")",
":",
"def",
"get_check_next_byte",
"(",
")",
":",
"char",
"=",
"win",
".",
"getch",
"(",
")",
"if",
"128",
"<=",
"char",
"<=",
"191",
":",
"return",
"char",
"else",
":",
"raise",
"UnicodeError",
"bytes",
"=",
"[",
"]",
"if",
"char",
"<=",
"127",
":",
"# 1 bytes",
"bytes",
".",
"append",
"(",
"char",
")",
"#elif 194 <= char <= 223:",
"elif",
"192",
"<=",
"char",
"<=",
"223",
":",
"# 2 bytes",
"bytes",
".",
"append",
"(",
"char",
")",
"bytes",
".",
"append",
"(",
"get_check_next_byte",
"(",
")",
")",
"elif",
"224",
"<=",
"char",
"<=",
"239",
":",
"# 3 bytes",
"bytes",
".",
"append",
"(",
"char",
")",
"bytes",
".",
"append",
"(",
"get_check_next_byte",
"(",
")",
")",
"bytes",
".",
"append",
"(",
"get_check_next_byte",
"(",
")",
")",
"elif",
"240",
"<=",
"char",
"<=",
"244",
":",
"# 4 bytes",
"bytes",
".",
"append",
"(",
"char",
")",
"bytes",
".",
"append",
"(",
"get_check_next_byte",
"(",
")",
")",
"bytes",
".",
"append",
"(",
"get_check_next_byte",
"(",
")",
")",
"bytes",
".",
"append",
"(",
"get_check_next_byte",
"(",
")",
")",
"#print('bytes = {}'.format(bytes))",
"while",
"0",
"in",
"bytes",
":",
"bytes",
".",
"remove",
"(",
"0",
")",
"if",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
":",
"out",
"=",
"''",
".",
"join",
"(",
"[",
"chr",
"(",
"b",
")",
"for",
"b",
"in",
"bytes",
"]",
")",
"else",
":",
"buf",
"=",
"bytearray",
"(",
"bytes",
")",
"out",
"=",
"self",
".",
"_decode_string",
"(",
"buf",
")",
"#out = buf.decode('utf-8')",
"return",
"out"
] | no zero byte allowed | [
"no",
"zero",
"byte",
"allowed"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/simple_curses_widgets.py#L342-L380 | train |
coderholic/pyradio | pyradio/edit.py | PyRadioSearch._get_history_next | def _get_history_next(self):
""" callback function for key down """
if self._has_history:
ret = self._input_history.return_history(1)
self.string = ret
self._curs_pos = len(ret) | python | def _get_history_next(self):
""" callback function for key down """
if self._has_history:
ret = self._input_history.return_history(1)
self.string = ret
self._curs_pos = len(ret) | [
"def",
"_get_history_next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_has_history",
":",
"ret",
"=",
"self",
".",
"_input_history",
".",
"return_history",
"(",
"1",
")",
"self",
".",
"string",
"=",
"ret",
"self",
".",
"_curs_pos",
"=",
"len",
"(",
"ret",
")"
] | callback function for key down | [
"callback",
"function",
"for",
"key",
"down"
] | c5219d350bccbccd49dbd627c1f886a952ea1963 | https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/edit.py#L49-L54 | train |
bids-standard/pybids | bids/analysis/analysis.py | apply_transformations | def apply_transformations(collection, transformations, select=None):
''' Apply all transformations to the variables in the collection.
Args:
transformations (list): List of transformations to apply.
select (list): Optional list of names of variables to retain after all
transformations are applied.
'''
for t in transformations:
kwargs = dict(t)
func = kwargs.pop('name')
cols = kwargs.pop('input', None)
if isinstance(func, string_types):
if func in ('and', 'or'):
func += '_'
if not hasattr(transform, func):
raise ValueError("No transformation '%s' found!" % func)
func = getattr(transform, func)
func(collection, cols, **kwargs)
if select is not None:
transform.Select(collection, select)
return collection | python | def apply_transformations(collection, transformations, select=None):
''' Apply all transformations to the variables in the collection.
Args:
transformations (list): List of transformations to apply.
select (list): Optional list of names of variables to retain after all
transformations are applied.
'''
for t in transformations:
kwargs = dict(t)
func = kwargs.pop('name')
cols = kwargs.pop('input', None)
if isinstance(func, string_types):
if func in ('and', 'or'):
func += '_'
if not hasattr(transform, func):
raise ValueError("No transformation '%s' found!" % func)
func = getattr(transform, func)
func(collection, cols, **kwargs)
if select is not None:
transform.Select(collection, select)
return collection | [
"def",
"apply_transformations",
"(",
"collection",
",",
"transformations",
",",
"select",
"=",
"None",
")",
":",
"for",
"t",
"in",
"transformations",
":",
"kwargs",
"=",
"dict",
"(",
"t",
")",
"func",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"cols",
"=",
"kwargs",
".",
"pop",
"(",
"'input'",
",",
"None",
")",
"if",
"isinstance",
"(",
"func",
",",
"string_types",
")",
":",
"if",
"func",
"in",
"(",
"'and'",
",",
"'or'",
")",
":",
"func",
"+=",
"'_'",
"if",
"not",
"hasattr",
"(",
"transform",
",",
"func",
")",
":",
"raise",
"ValueError",
"(",
"\"No transformation '%s' found!\"",
"%",
"func",
")",
"func",
"=",
"getattr",
"(",
"transform",
",",
"func",
")",
"func",
"(",
"collection",
",",
"cols",
",",
"*",
"*",
"kwargs",
")",
"if",
"select",
"is",
"not",
"None",
":",
"transform",
".",
"Select",
"(",
"collection",
",",
"select",
")",
"return",
"collection"
] | Apply all transformations to the variables in the collection.
Args:
transformations (list): List of transformations to apply.
select (list): Optional list of names of variables to retain after all
transformations are applied. | [
"Apply",
"all",
"transformations",
"to",
"the",
"variables",
"in",
"the",
"collection",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/analysis.py#L489-L513 | train |
bids-standard/pybids | bids/analysis/analysis.py | Analysis.setup | def setup(self, steps=None, drop_na=False, **kwargs):
''' Set up the sequence of steps for analysis.
Args:
steps (list): Optional list of steps to set up. Each element
must be either an int giving the index of the step in the
JSON config block list, or a str giving the (unique) name of
the step, as specified in the JSON config. Steps that do not
match either index or name will be skipped.
drop_na (bool): Boolean indicating whether or not to automatically
drop events that have a n/a amplitude when reading in data
from event files.
'''
# In the beginning, there was nothing
input_nodes = None
# Use inputs from model, and update with kwargs
selectors = self.model.get('input', {}).copy()
selectors.update(kwargs)
for i, b in enumerate(self.steps):
# Skip any steps whose names or indexes don't match block list
if steps is not None and i not in steps and b.name not in steps:
continue
b.setup(input_nodes, drop_na=drop_na, **selectors)
input_nodes = b.output_nodes | python | def setup(self, steps=None, drop_na=False, **kwargs):
''' Set up the sequence of steps for analysis.
Args:
steps (list): Optional list of steps to set up. Each element
must be either an int giving the index of the step in the
JSON config block list, or a str giving the (unique) name of
the step, as specified in the JSON config. Steps that do not
match either index or name will be skipped.
drop_na (bool): Boolean indicating whether or not to automatically
drop events that have a n/a amplitude when reading in data
from event files.
'''
# In the beginning, there was nothing
input_nodes = None
# Use inputs from model, and update with kwargs
selectors = self.model.get('input', {}).copy()
selectors.update(kwargs)
for i, b in enumerate(self.steps):
# Skip any steps whose names or indexes don't match block list
if steps is not None and i not in steps and b.name not in steps:
continue
b.setup(input_nodes, drop_na=drop_na, **selectors)
input_nodes = b.output_nodes | [
"def",
"setup",
"(",
"self",
",",
"steps",
"=",
"None",
",",
"drop_na",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# In the beginning, there was nothing",
"input_nodes",
"=",
"None",
"# Use inputs from model, and update with kwargs",
"selectors",
"=",
"self",
".",
"model",
".",
"get",
"(",
"'input'",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
"selectors",
".",
"update",
"(",
"kwargs",
")",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"self",
".",
"steps",
")",
":",
"# Skip any steps whose names or indexes don't match block list",
"if",
"steps",
"is",
"not",
"None",
"and",
"i",
"not",
"in",
"steps",
"and",
"b",
".",
"name",
"not",
"in",
"steps",
":",
"continue",
"b",
".",
"setup",
"(",
"input_nodes",
",",
"drop_na",
"=",
"drop_na",
",",
"*",
"*",
"selectors",
")",
"input_nodes",
"=",
"b",
".",
"output_nodes"
] | Set up the sequence of steps for analysis.
Args:
steps (list): Optional list of steps to set up. Each element
must be either an int giving the index of the step in the
JSON config block list, or a str giving the (unique) name of
the step, as specified in the JSON config. Steps that do not
match either index or name will be skipped.
drop_na (bool): Boolean indicating whether or not to automatically
drop events that have a n/a amplitude when reading in data
from event files. | [
"Set",
"up",
"the",
"sequence",
"of",
"steps",
"for",
"analysis",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/analysis.py#L62-L90 | train |
bids-standard/pybids | bids/analysis/analysis.py | Step.setup | def setup(self, input_nodes=None, drop_na=False, **kwargs):
''' Set up the Step and construct the design matrix.
Args:
input_nodes (list): Optional list of Node objects produced by
the preceding Step in the analysis. If None, uses any inputs
passed in at Step initialization.
drop_na (bool): Boolean indicating whether or not to automatically
drop events that have a n/a amplitude when reading in data
from event files.
kwargs: Optional keyword arguments to pass onto load_variables.
'''
self.output_nodes = []
input_nodes = input_nodes or self.input_nodes or []
# TODO: remove the scan_length argument entirely once we switch tests
# to use the synthetic dataset with image headers.
if self.level != 'run':
kwargs = kwargs.copy()
kwargs.pop('scan_length', None)
collections = self.layout.get_collections(self.level, drop_na=drop_na,
**kwargs)
objects = collections + input_nodes
objects, kwargs = self._filter_objects(objects, kwargs)
groups = self._group_objects(objects)
# Set up and validate variable lists
model = self.model or {}
X = model.get('x', [])
for grp in groups:
# Split into separate lists of Collections and Nodes
input_nodes = [o for o in grp if isinstance(o, AnalysisNode)]
colls = list(set(grp) - set(input_nodes))
if input_nodes:
node_coll = self._concatenate_input_nodes(input_nodes)
colls.append(node_coll)
coll = merge_collections(colls) if len(colls) > 1 else colls[0]
coll = apply_transformations(coll, self.transformations)
if X:
transform.Select(coll, X)
node = AnalysisNode(self.level, coll, self.contrasts, input_nodes,
self.auto_contrasts)
self.output_nodes.append(node) | python | def setup(self, input_nodes=None, drop_na=False, **kwargs):
''' Set up the Step and construct the design matrix.
Args:
input_nodes (list): Optional list of Node objects produced by
the preceding Step in the analysis. If None, uses any inputs
passed in at Step initialization.
drop_na (bool): Boolean indicating whether or not to automatically
drop events that have a n/a amplitude when reading in data
from event files.
kwargs: Optional keyword arguments to pass onto load_variables.
'''
self.output_nodes = []
input_nodes = input_nodes or self.input_nodes or []
# TODO: remove the scan_length argument entirely once we switch tests
# to use the synthetic dataset with image headers.
if self.level != 'run':
kwargs = kwargs.copy()
kwargs.pop('scan_length', None)
collections = self.layout.get_collections(self.level, drop_na=drop_na,
**kwargs)
objects = collections + input_nodes
objects, kwargs = self._filter_objects(objects, kwargs)
groups = self._group_objects(objects)
# Set up and validate variable lists
model = self.model or {}
X = model.get('x', [])
for grp in groups:
# Split into separate lists of Collections and Nodes
input_nodes = [o for o in grp if isinstance(o, AnalysisNode)]
colls = list(set(grp) - set(input_nodes))
if input_nodes:
node_coll = self._concatenate_input_nodes(input_nodes)
colls.append(node_coll)
coll = merge_collections(colls) if len(colls) > 1 else colls[0]
coll = apply_transformations(coll, self.transformations)
if X:
transform.Select(coll, X)
node = AnalysisNode(self.level, coll, self.contrasts, input_nodes,
self.auto_contrasts)
self.output_nodes.append(node) | [
"def",
"setup",
"(",
"self",
",",
"input_nodes",
"=",
"None",
",",
"drop_na",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"output_nodes",
"=",
"[",
"]",
"input_nodes",
"=",
"input_nodes",
"or",
"self",
".",
"input_nodes",
"or",
"[",
"]",
"# TODO: remove the scan_length argument entirely once we switch tests",
"# to use the synthetic dataset with image headers.",
"if",
"self",
".",
"level",
"!=",
"'run'",
":",
"kwargs",
"=",
"kwargs",
".",
"copy",
"(",
")",
"kwargs",
".",
"pop",
"(",
"'scan_length'",
",",
"None",
")",
"collections",
"=",
"self",
".",
"layout",
".",
"get_collections",
"(",
"self",
".",
"level",
",",
"drop_na",
"=",
"drop_na",
",",
"*",
"*",
"kwargs",
")",
"objects",
"=",
"collections",
"+",
"input_nodes",
"objects",
",",
"kwargs",
"=",
"self",
".",
"_filter_objects",
"(",
"objects",
",",
"kwargs",
")",
"groups",
"=",
"self",
".",
"_group_objects",
"(",
"objects",
")",
"# Set up and validate variable lists",
"model",
"=",
"self",
".",
"model",
"or",
"{",
"}",
"X",
"=",
"model",
".",
"get",
"(",
"'x'",
",",
"[",
"]",
")",
"for",
"grp",
"in",
"groups",
":",
"# Split into separate lists of Collections and Nodes",
"input_nodes",
"=",
"[",
"o",
"for",
"o",
"in",
"grp",
"if",
"isinstance",
"(",
"o",
",",
"AnalysisNode",
")",
"]",
"colls",
"=",
"list",
"(",
"set",
"(",
"grp",
")",
"-",
"set",
"(",
"input_nodes",
")",
")",
"if",
"input_nodes",
":",
"node_coll",
"=",
"self",
".",
"_concatenate_input_nodes",
"(",
"input_nodes",
")",
"colls",
".",
"append",
"(",
"node_coll",
")",
"coll",
"=",
"merge_collections",
"(",
"colls",
")",
"if",
"len",
"(",
"colls",
")",
">",
"1",
"else",
"colls",
"[",
"0",
"]",
"coll",
"=",
"apply_transformations",
"(",
"coll",
",",
"self",
".",
"transformations",
")",
"if",
"X",
":",
"transform",
".",
"Select",
"(",
"coll",
",",
"X",
")",
"node",
"=",
"AnalysisNode",
"(",
"self",
".",
"level",
",",
"coll",
",",
"self",
".",
"contrasts",
",",
"input_nodes",
",",
"self",
".",
"auto_contrasts",
")",
"self",
".",
"output_nodes",
".",
"append",
"(",
"node",
")"
] | Set up the Step and construct the design matrix.
Args:
input_nodes (list): Optional list of Node objects produced by
the preceding Step in the analysis. If None, uses any inputs
passed in at Step initialization.
drop_na (bool): Boolean indicating whether or not to automatically
drop events that have a n/a amplitude when reading in data
from event files.
kwargs: Optional keyword arguments to pass onto load_variables. | [
"Set",
"up",
"the",
"Step",
"and",
"construct",
"the",
"design",
"matrix",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/analysis.py#L168-L218 | train |
bids-standard/pybids | bids/reports/utils.py | get_slice_info | def get_slice_info(slice_times):
"""
Extract slice order from slice timing info.
TODO: Be more specific with slice orders.
Currently anything where there's some kind of skipping is interpreted as
interleaved of some kind.
Parameters
----------
slice_times : array-like
A list of slice times in seconds or milliseconds or whatever.
Returns
-------
slice_order_name : :obj:`str`
The name of the slice order sequence.
"""
# Slice order
slice_times = remove_duplicates(slice_times)
slice_order = sorted(range(len(slice_times)), key=lambda k: slice_times[k])
if slice_order == range(len(slice_order)):
slice_order_name = 'sequential ascending'
elif slice_order == reversed(range(len(slice_order))):
slice_order_name = 'sequential descending'
elif slice_order[0] < slice_order[1]:
# We're allowing some wiggle room on interleaved.
slice_order_name = 'interleaved ascending'
elif slice_order[0] > slice_order[1]:
slice_order_name = 'interleaved descending'
else:
slice_order = [str(s) for s in slice_order]
raise Exception('Unknown slice order: [{0}]'.format(', '.join(slice_order)))
return slice_order_name | python | def get_slice_info(slice_times):
"""
Extract slice order from slice timing info.
TODO: Be more specific with slice orders.
Currently anything where there's some kind of skipping is interpreted as
interleaved of some kind.
Parameters
----------
slice_times : array-like
A list of slice times in seconds or milliseconds or whatever.
Returns
-------
slice_order_name : :obj:`str`
The name of the slice order sequence.
"""
# Slice order
slice_times = remove_duplicates(slice_times)
slice_order = sorted(range(len(slice_times)), key=lambda k: slice_times[k])
if slice_order == range(len(slice_order)):
slice_order_name = 'sequential ascending'
elif slice_order == reversed(range(len(slice_order))):
slice_order_name = 'sequential descending'
elif slice_order[0] < slice_order[1]:
# We're allowing some wiggle room on interleaved.
slice_order_name = 'interleaved ascending'
elif slice_order[0] > slice_order[1]:
slice_order_name = 'interleaved descending'
else:
slice_order = [str(s) for s in slice_order]
raise Exception('Unknown slice order: [{0}]'.format(', '.join(slice_order)))
return slice_order_name | [
"def",
"get_slice_info",
"(",
"slice_times",
")",
":",
"# Slice order",
"slice_times",
"=",
"remove_duplicates",
"(",
"slice_times",
")",
"slice_order",
"=",
"sorted",
"(",
"range",
"(",
"len",
"(",
"slice_times",
")",
")",
",",
"key",
"=",
"lambda",
"k",
":",
"slice_times",
"[",
"k",
"]",
")",
"if",
"slice_order",
"==",
"range",
"(",
"len",
"(",
"slice_order",
")",
")",
":",
"slice_order_name",
"=",
"'sequential ascending'",
"elif",
"slice_order",
"==",
"reversed",
"(",
"range",
"(",
"len",
"(",
"slice_order",
")",
")",
")",
":",
"slice_order_name",
"=",
"'sequential descending'",
"elif",
"slice_order",
"[",
"0",
"]",
"<",
"slice_order",
"[",
"1",
"]",
":",
"# We're allowing some wiggle room on interleaved.",
"slice_order_name",
"=",
"'interleaved ascending'",
"elif",
"slice_order",
"[",
"0",
"]",
">",
"slice_order",
"[",
"1",
"]",
":",
"slice_order_name",
"=",
"'interleaved descending'",
"else",
":",
"slice_order",
"=",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"slice_order",
"]",
"raise",
"Exception",
"(",
"'Unknown slice order: [{0}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"slice_order",
")",
")",
")",
"return",
"slice_order_name"
] | Extract slice order from slice timing info.
TODO: Be more specific with slice orders.
Currently anything where there's some kind of skipping is interpreted as
interleaved of some kind.
Parameters
----------
slice_times : array-like
A list of slice times in seconds or milliseconds or whatever.
Returns
-------
slice_order_name : :obj:`str`
The name of the slice order sequence. | [
"Extract",
"slice",
"order",
"from",
"slice",
"timing",
"info",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/utils.py#L70-L104 | train |
bids-standard/pybids | bids/reports/utils.py | get_sizestr | def get_sizestr(img):
"""
Extract and reformat voxel size, matrix size, field of view, and number of
slices into pretty strings.
Parameters
----------
img : :obj:`nibabel.Nifti1Image`
Image from scan from which to derive parameters.
Returns
-------
n_slices : :obj:`int`
Number of slices.
voxel_size : :obj:`str`
Voxel size string (e.g., '2x2x2')
matrix_size : :obj:`str`
Matrix size string (e.g., '128x128')
fov : :obj:`str`
Field of view string (e.g., '256x256')
"""
n_x, n_y, n_slices = img.shape[:3]
import numpy as np
voxel_dims = np.array(img.header.get_zooms()[:3])
matrix_size = '{0}x{1}'.format(num_to_str(n_x), num_to_str(n_y))
voxel_size = 'x'.join([num_to_str(s) for s in voxel_dims])
fov = [n_x, n_y] * voxel_dims[:2]
fov = 'x'.join([num_to_str(s) for s in fov])
return n_slices, voxel_size, matrix_size, fov | python | def get_sizestr(img):
"""
Extract and reformat voxel size, matrix size, field of view, and number of
slices into pretty strings.
Parameters
----------
img : :obj:`nibabel.Nifti1Image`
Image from scan from which to derive parameters.
Returns
-------
n_slices : :obj:`int`
Number of slices.
voxel_size : :obj:`str`
Voxel size string (e.g., '2x2x2')
matrix_size : :obj:`str`
Matrix size string (e.g., '128x128')
fov : :obj:`str`
Field of view string (e.g., '256x256')
"""
n_x, n_y, n_slices = img.shape[:3]
import numpy as np
voxel_dims = np.array(img.header.get_zooms()[:3])
matrix_size = '{0}x{1}'.format(num_to_str(n_x), num_to_str(n_y))
voxel_size = 'x'.join([num_to_str(s) for s in voxel_dims])
fov = [n_x, n_y] * voxel_dims[:2]
fov = 'x'.join([num_to_str(s) for s in fov])
return n_slices, voxel_size, matrix_size, fov | [
"def",
"get_sizestr",
"(",
"img",
")",
":",
"n_x",
",",
"n_y",
",",
"n_slices",
"=",
"img",
".",
"shape",
"[",
":",
"3",
"]",
"import",
"numpy",
"as",
"np",
"voxel_dims",
"=",
"np",
".",
"array",
"(",
"img",
".",
"header",
".",
"get_zooms",
"(",
")",
"[",
":",
"3",
"]",
")",
"matrix_size",
"=",
"'{0}x{1}'",
".",
"format",
"(",
"num_to_str",
"(",
"n_x",
")",
",",
"num_to_str",
"(",
"n_y",
")",
")",
"voxel_size",
"=",
"'x'",
".",
"join",
"(",
"[",
"num_to_str",
"(",
"s",
")",
"for",
"s",
"in",
"voxel_dims",
"]",
")",
"fov",
"=",
"[",
"n_x",
",",
"n_y",
"]",
"*",
"voxel_dims",
"[",
":",
"2",
"]",
"fov",
"=",
"'x'",
".",
"join",
"(",
"[",
"num_to_str",
"(",
"s",
")",
"for",
"s",
"in",
"fov",
"]",
")",
"return",
"n_slices",
",",
"voxel_size",
",",
"matrix_size",
",",
"fov"
] | Extract and reformat voxel size, matrix size, field of view, and number of
slices into pretty strings.
Parameters
----------
img : :obj:`nibabel.Nifti1Image`
Image from scan from which to derive parameters.
Returns
-------
n_slices : :obj:`int`
Number of slices.
voxel_size : :obj:`str`
Voxel size string (e.g., '2x2x2')
matrix_size : :obj:`str`
Matrix size string (e.g., '128x128')
fov : :obj:`str`
Field of view string (e.g., '256x256') | [
"Extract",
"and",
"reformat",
"voxel",
"size",
"matrix",
"size",
"field",
"of",
"view",
"and",
"number",
"of",
"slices",
"into",
"pretty",
"strings",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/utils.py#L138-L166 | train |
bids-standard/pybids | bids/layout/layout.py | add_config_paths | def add_config_paths(**kwargs):
""" Add to the pool of available configuration files for BIDSLayout.
Args:
kwargs: dictionary specifying where to find additional config files.
Keys are names, values are paths to the corresponding .json file.
Example:
> add_config_paths(my_config='/path/to/config')
> layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config'])
"""
for k, path in kwargs.items():
if not os.path.exists(path):
raise ValueError(
'Configuration file "{}" does not exist'.format(k))
if k in cf.get_option('config_paths'):
raise ValueError('Configuration {!r} already exists'.format(k))
kwargs.update(**cf.get_option('config_paths'))
cf.set_option('config_paths', kwargs) | python | def add_config_paths(**kwargs):
""" Add to the pool of available configuration files for BIDSLayout.
Args:
kwargs: dictionary specifying where to find additional config files.
Keys are names, values are paths to the corresponding .json file.
Example:
> add_config_paths(my_config='/path/to/config')
> layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config'])
"""
for k, path in kwargs.items():
if not os.path.exists(path):
raise ValueError(
'Configuration file "{}" does not exist'.format(k))
if k in cf.get_option('config_paths'):
raise ValueError('Configuration {!r} already exists'.format(k))
kwargs.update(**cf.get_option('config_paths'))
cf.set_option('config_paths', kwargs) | [
"def",
"add_config_paths",
"(",
"*",
"*",
"kwargs",
")",
":",
"for",
"k",
",",
"path",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'Configuration file \"{}\" does not exist'",
".",
"format",
"(",
"k",
")",
")",
"if",
"k",
"in",
"cf",
".",
"get_option",
"(",
"'config_paths'",
")",
":",
"raise",
"ValueError",
"(",
"'Configuration {!r} already exists'",
".",
"format",
"(",
"k",
")",
")",
"kwargs",
".",
"update",
"(",
"*",
"*",
"cf",
".",
"get_option",
"(",
"'config_paths'",
")",
")",
"cf",
".",
"set_option",
"(",
"'config_paths'",
",",
"kwargs",
")"
] | Add to the pool of available configuration files for BIDSLayout.
Args:
kwargs: dictionary specifying where to find additional config files.
Keys are names, values are paths to the corresponding .json file.
Example:
> add_config_paths(my_config='/path/to/config')
> layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config']) | [
"Add",
"to",
"the",
"pool",
"of",
"available",
"configuration",
"files",
"for",
"BIDSLayout",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L77-L97 | train |
bids-standard/pybids | bids/layout/layout.py | BIDSLayout.add_derivatives | def add_derivatives(self, path, **kwargs):
''' Add BIDS-Derivatives datasets to tracking.
Args:
path (str, list): One or more paths to BIDS-Derivatives datasets.
Each path can point to either a derivatives/ directory
containing one more more pipeline directories, or to a single
pipeline directory (e.g., derivatives/fmriprep).
kwargs (dict): Optional keyword arguments to pass on to
BIDSLayout() when initializing each of the derivative datasets.
Note: Every derivatives directory intended for indexing MUST contain a
valid dataset_description.json file. See the BIDS-Derivatives
specification for details.
'''
paths = listify(path)
deriv_dirs = []
# Collect all paths that contain a dataset_description.json
def check_for_description(dir):
dd = os.path.join(dir, 'dataset_description.json')
return os.path.exists(dd)
for p in paths:
p = os.path.abspath(p)
if os.path.exists(p):
if check_for_description(p):
deriv_dirs.append(p)
else:
subdirs = [d for d in os.listdir(p)
if os.path.isdir(os.path.join(p, d))]
for sd in subdirs:
sd = os.path.join(p, sd)
if check_for_description(sd):
deriv_dirs.append(sd)
if not deriv_dirs:
warnings.warn("Derivative indexing was enabled, but no valid "
"derivatives datasets were found in any of the "
"provided or default locations. Please make sure "
"all derivatives datasets you intend to index "
"contain a 'dataset_description.json' file, as "
"described in the BIDS-derivatives specification.")
for deriv in deriv_dirs:
dd = os.path.join(deriv, 'dataset_description.json')
with open(dd, 'r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_name = description.get(
'PipelineDescription', {}).get('Name')
if pipeline_name is None:
raise ValueError("Every valid BIDS-derivatives dataset must "
"have a PipelineDescription.Name field set "
"inside dataset_description.json.")
if pipeline_name in self.derivatives:
raise ValueError("Pipeline name '%s' has already been added "
"to this BIDSLayout. Every added pipeline "
"must have a unique name!")
# Default config and sources values
kwargs['config'] = kwargs.get('config') or ['bids', 'derivatives']
kwargs['sources'] = kwargs.get('sources') or self
self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs)
# Consolidate all entities post-indexing. Note: no conflicts occur b/c
# multiple entries with the same name all point to the same instance.
for deriv in self.derivatives.values():
self.entities.update(deriv.entities) | python | def add_derivatives(self, path, **kwargs):
''' Add BIDS-Derivatives datasets to tracking.
Args:
path (str, list): One or more paths to BIDS-Derivatives datasets.
Each path can point to either a derivatives/ directory
containing one more more pipeline directories, or to a single
pipeline directory (e.g., derivatives/fmriprep).
kwargs (dict): Optional keyword arguments to pass on to
BIDSLayout() when initializing each of the derivative datasets.
Note: Every derivatives directory intended for indexing MUST contain a
valid dataset_description.json file. See the BIDS-Derivatives
specification for details.
'''
paths = listify(path)
deriv_dirs = []
# Collect all paths that contain a dataset_description.json
def check_for_description(dir):
dd = os.path.join(dir, 'dataset_description.json')
return os.path.exists(dd)
for p in paths:
p = os.path.abspath(p)
if os.path.exists(p):
if check_for_description(p):
deriv_dirs.append(p)
else:
subdirs = [d for d in os.listdir(p)
if os.path.isdir(os.path.join(p, d))]
for sd in subdirs:
sd = os.path.join(p, sd)
if check_for_description(sd):
deriv_dirs.append(sd)
if not deriv_dirs:
warnings.warn("Derivative indexing was enabled, but no valid "
"derivatives datasets were found in any of the "
"provided or default locations. Please make sure "
"all derivatives datasets you intend to index "
"contain a 'dataset_description.json' file, as "
"described in the BIDS-derivatives specification.")
for deriv in deriv_dirs:
dd = os.path.join(deriv, 'dataset_description.json')
with open(dd, 'r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_name = description.get(
'PipelineDescription', {}).get('Name')
if pipeline_name is None:
raise ValueError("Every valid BIDS-derivatives dataset must "
"have a PipelineDescription.Name field set "
"inside dataset_description.json.")
if pipeline_name in self.derivatives:
raise ValueError("Pipeline name '%s' has already been added "
"to this BIDSLayout. Every added pipeline "
"must have a unique name!")
# Default config and sources values
kwargs['config'] = kwargs.get('config') or ['bids', 'derivatives']
kwargs['sources'] = kwargs.get('sources') or self
self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs)
# Consolidate all entities post-indexing. Note: no conflicts occur b/c
# multiple entries with the same name all point to the same instance.
for deriv in self.derivatives.values():
self.entities.update(deriv.entities) | [
"def",
"add_derivatives",
"(",
"self",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"paths",
"=",
"listify",
"(",
"path",
")",
"deriv_dirs",
"=",
"[",
"]",
"# Collect all paths that contain a dataset_description.json",
"def",
"check_for_description",
"(",
"dir",
")",
":",
"dd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"'dataset_description.json'",
")",
"return",
"os",
".",
"path",
".",
"exists",
"(",
"dd",
")",
"for",
"p",
"in",
"paths",
":",
"p",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"p",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
":",
"if",
"check_for_description",
"(",
"p",
")",
":",
"deriv_dirs",
".",
"append",
"(",
"p",
")",
"else",
":",
"subdirs",
"=",
"[",
"d",
"for",
"d",
"in",
"os",
".",
"listdir",
"(",
"p",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"d",
")",
")",
"]",
"for",
"sd",
"in",
"subdirs",
":",
"sd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"sd",
")",
"if",
"check_for_description",
"(",
"sd",
")",
":",
"deriv_dirs",
".",
"append",
"(",
"sd",
")",
"if",
"not",
"deriv_dirs",
":",
"warnings",
".",
"warn",
"(",
"\"Derivative indexing was enabled, but no valid \"",
"\"derivatives datasets were found in any of the \"",
"\"provided or default locations. Please make sure \"",
"\"all derivatives datasets you intend to index \"",
"\"contain a 'dataset_description.json' file, as \"",
"\"described in the BIDS-derivatives specification.\"",
")",
"for",
"deriv",
"in",
"deriv_dirs",
":",
"dd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"deriv",
",",
"'dataset_description.json'",
")",
"with",
"open",
"(",
"dd",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"ddfd",
":",
"description",
"=",
"json",
".",
"load",
"(",
"ddfd",
")",
"pipeline_name",
"=",
"description",
".",
"get",
"(",
"'PipelineDescription'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'Name'",
")",
"if",
"pipeline_name",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Every valid BIDS-derivatives dataset must \"",
"\"have a PipelineDescription.Name field set \"",
"\"inside dataset_description.json.\"",
")",
"if",
"pipeline_name",
"in",
"self",
".",
"derivatives",
":",
"raise",
"ValueError",
"(",
"\"Pipeline name '%s' has already been added \"",
"\"to this BIDSLayout. Every added pipeline \"",
"\"must have a unique name!\"",
")",
"# Default config and sources values",
"kwargs",
"[",
"'config'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'config'",
")",
"or",
"[",
"'bids'",
",",
"'derivatives'",
"]",
"kwargs",
"[",
"'sources'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'sources'",
")",
"or",
"self",
"self",
".",
"derivatives",
"[",
"pipeline_name",
"]",
"=",
"BIDSLayout",
"(",
"deriv",
",",
"*",
"*",
"kwargs",
")",
"# Consolidate all entities post-indexing. Note: no conflicts occur b/c",
"# multiple entries with the same name all point to the same instance.",
"for",
"deriv",
"in",
"self",
".",
"derivatives",
".",
"values",
"(",
")",
":",
"self",
".",
"entities",
".",
"update",
"(",
"deriv",
".",
"entities",
")"
] | Add BIDS-Derivatives datasets to tracking.
Args:
path (str, list): One or more paths to BIDS-Derivatives datasets.
Each path can point to either a derivatives/ directory
containing one more more pipeline directories, or to a single
pipeline directory (e.g., derivatives/fmriprep).
kwargs (dict): Optional keyword arguments to pass on to
BIDSLayout() when initializing each of the derivative datasets.
Note: Every derivatives directory intended for indexing MUST contain a
valid dataset_description.json file. See the BIDS-Derivatives
specification for details. | [
"Add",
"BIDS",
"-",
"Derivatives",
"datasets",
"to",
"tracking",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L352-L418 | train |
bids-standard/pybids | bids/layout/layout.py | BIDSLayout.get_file | def get_file(self, filename, scope='all'):
''' Returns the BIDSFile object with the specified path.
Args:
filename (str): The path of the file to retrieve. Must be either
an absolute path, or relative to the root of this BIDSLayout.
scope (str, list): Scope of the search space. If passed, only
BIDSLayouts that match the specified scope will be
searched. See BIDSLayout docstring for valid values.
Returns: A BIDSFile, or None if no match was found.
'''
filename = os.path.abspath(os.path.join(self.root, filename))
layouts = self._get_layouts_in_scope(scope)
for ly in layouts:
if filename in ly.files:
return ly.files[filename]
return None | python | def get_file(self, filename, scope='all'):
''' Returns the BIDSFile object with the specified path.
Args:
filename (str): The path of the file to retrieve. Must be either
an absolute path, or relative to the root of this BIDSLayout.
scope (str, list): Scope of the search space. If passed, only
BIDSLayouts that match the specified scope will be
searched. See BIDSLayout docstring for valid values.
Returns: A BIDSFile, or None if no match was found.
'''
filename = os.path.abspath(os.path.join(self.root, filename))
layouts = self._get_layouts_in_scope(scope)
for ly in layouts:
if filename in ly.files:
return ly.files[filename]
return None | [
"def",
"get_file",
"(",
"self",
",",
"filename",
",",
"scope",
"=",
"'all'",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"root",
",",
"filename",
")",
")",
"layouts",
"=",
"self",
".",
"_get_layouts_in_scope",
"(",
"scope",
")",
"for",
"ly",
"in",
"layouts",
":",
"if",
"filename",
"in",
"ly",
".",
"files",
":",
"return",
"ly",
".",
"files",
"[",
"filename",
"]",
"return",
"None"
] | Returns the BIDSFile object with the specified path.
Args:
filename (str): The path of the file to retrieve. Must be either
an absolute path, or relative to the root of this BIDSLayout.
scope (str, list): Scope of the search space. If passed, only
BIDSLayouts that match the specified scope will be
searched. See BIDSLayout docstring for valid values.
Returns: A BIDSFile, or None if no match was found. | [
"Returns",
"the",
"BIDSFile",
"object",
"with",
"the",
"specified",
"path",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L600-L617 | train |
bids-standard/pybids | bids/layout/layout.py | BIDSLayout.get_collections | def get_collections(self, level, types=None, variables=None, merge=False,
sampling_rate=None, skip_empty=False, **kwargs):
"""Return one or more variable Collections in the BIDS project.
Args:
level (str): The level of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
types (str, list): Types of variables to retrieve. All valid values
reflect the filename stipulated in the BIDS spec for each kind of
variable. Valid values include: 'events', 'physio', 'stim',
'scans', 'participants', 'sessions', and 'regressors'.
variables (list): Optional list of variables names to return. If
None, all available variables are returned.
merge (bool): If True, variables are merged across all observations
of the current level. E.g., if level='subject', variables from
all subjects will be merged into a single collection. If False,
each observation is handled separately, and the result is
returned as a list.
sampling_rate (int, str): If level='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
skip_empty (bool): Whether or not to skip empty Variables (i.e.,
where there are no rows/records in a file after applying any
filtering operations like dropping NaNs).
kwargs: Optional additional arguments to pass onto load_variables.
"""
from bids.variables import load_variables
index = load_variables(self, types=types, levels=level,
skip_empty=skip_empty, **kwargs)
return index.get_collections(level, variables, merge,
sampling_rate=sampling_rate) | python | def get_collections(self, level, types=None, variables=None, merge=False,
sampling_rate=None, skip_empty=False, **kwargs):
"""Return one or more variable Collections in the BIDS project.
Args:
level (str): The level of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
types (str, list): Types of variables to retrieve. All valid values
reflect the filename stipulated in the BIDS spec for each kind of
variable. Valid values include: 'events', 'physio', 'stim',
'scans', 'participants', 'sessions', and 'regressors'.
variables (list): Optional list of variables names to return. If
None, all available variables are returned.
merge (bool): If True, variables are merged across all observations
of the current level. E.g., if level='subject', variables from
all subjects will be merged into a single collection. If False,
each observation is handled separately, and the result is
returned as a list.
sampling_rate (int, str): If level='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
skip_empty (bool): Whether or not to skip empty Variables (i.e.,
where there are no rows/records in a file after applying any
filtering operations like dropping NaNs).
kwargs: Optional additional arguments to pass onto load_variables.
"""
from bids.variables import load_variables
index = load_variables(self, types=types, levels=level,
skip_empty=skip_empty, **kwargs)
return index.get_collections(level, variables, merge,
sampling_rate=sampling_rate) | [
"def",
"get_collections",
"(",
"self",
",",
"level",
",",
"types",
"=",
"None",
",",
"variables",
"=",
"None",
",",
"merge",
"=",
"False",
",",
"sampling_rate",
"=",
"None",
",",
"skip_empty",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"bids",
".",
"variables",
"import",
"load_variables",
"index",
"=",
"load_variables",
"(",
"self",
",",
"types",
"=",
"types",
",",
"levels",
"=",
"level",
",",
"skip_empty",
"=",
"skip_empty",
",",
"*",
"*",
"kwargs",
")",
"return",
"index",
".",
"get_collections",
"(",
"level",
",",
"variables",
",",
"merge",
",",
"sampling_rate",
"=",
"sampling_rate",
")"
] | Return one or more variable Collections in the BIDS project.
Args:
level (str): The level of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
types (str, list): Types of variables to retrieve. All valid values
reflect the filename stipulated in the BIDS spec for each kind of
variable. Valid values include: 'events', 'physio', 'stim',
'scans', 'participants', 'sessions', and 'regressors'.
variables (list): Optional list of variables names to return. If
None, all available variables are returned.
merge (bool): If True, variables are merged across all observations
of the current level. E.g., if level='subject', variables from
all subjects will be merged into a single collection. If False,
each observation is handled separately, and the result is
returned as a list.
sampling_rate (int, str): If level='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
skip_empty (bool): Whether or not to skip empty Variables (i.e.,
where there are no rows/records in a file after applying any
filtering operations like dropping NaNs).
kwargs: Optional additional arguments to pass onto load_variables. | [
"Return",
"one",
"or",
"more",
"variable",
"Collections",
"in",
"the",
"BIDS",
"project",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L619-L648 | train |
bids-standard/pybids | bids/layout/layout.py | BIDSLayout.get_metadata | def get_metadata(self, path, include_entities=False, **kwargs):
"""Return metadata found in JSON sidecars for the specified file.
Args:
path (str): Path to the file to get metadata for.
include_entities (bool): If True, all available entities extracted
from the filename (rather than JSON sidecars) are included in
the returned metadata dictionary.
kwargs (dict): Optional keyword arguments to pass onto
get_nearest().
Returns: A dictionary of key/value pairs extracted from all of the
target file's associated JSON sidecars.
Notes:
A dictionary containing metadata extracted from all matching .json
files is returned. In cases where the same key is found in multiple
files, the values in files closer to the input filename will take
precedence, per the inheritance rules in the BIDS specification.
"""
f = self.get_file(path)
# For querying efficiency, store metadata in the MetadataIndex cache
self.metadata_index.index_file(f.path)
if include_entities:
entities = f.entities
results = entities
else:
results = {}
results.update(self.metadata_index.file_index[path])
return results | python | def get_metadata(self, path, include_entities=False, **kwargs):
"""Return metadata found in JSON sidecars for the specified file.
Args:
path (str): Path to the file to get metadata for.
include_entities (bool): If True, all available entities extracted
from the filename (rather than JSON sidecars) are included in
the returned metadata dictionary.
kwargs (dict): Optional keyword arguments to pass onto
get_nearest().
Returns: A dictionary of key/value pairs extracted from all of the
target file's associated JSON sidecars.
Notes:
A dictionary containing metadata extracted from all matching .json
files is returned. In cases where the same key is found in multiple
files, the values in files closer to the input filename will take
precedence, per the inheritance rules in the BIDS specification.
"""
f = self.get_file(path)
# For querying efficiency, store metadata in the MetadataIndex cache
self.metadata_index.index_file(f.path)
if include_entities:
entities = f.entities
results = entities
else:
results = {}
results.update(self.metadata_index.file_index[path])
return results | [
"def",
"get_metadata",
"(",
"self",
",",
"path",
",",
"include_entities",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"f",
"=",
"self",
".",
"get_file",
"(",
"path",
")",
"# For querying efficiency, store metadata in the MetadataIndex cache",
"self",
".",
"metadata_index",
".",
"index_file",
"(",
"f",
".",
"path",
")",
"if",
"include_entities",
":",
"entities",
"=",
"f",
".",
"entities",
"results",
"=",
"entities",
"else",
":",
"results",
"=",
"{",
"}",
"results",
".",
"update",
"(",
"self",
".",
"metadata_index",
".",
"file_index",
"[",
"path",
"]",
")",
"return",
"results"
] | Return metadata found in JSON sidecars for the specified file.
Args:
path (str): Path to the file to get metadata for.
include_entities (bool): If True, all available entities extracted
from the filename (rather than JSON sidecars) are included in
the returned metadata dictionary.
kwargs (dict): Optional keyword arguments to pass onto
get_nearest().
Returns: A dictionary of key/value pairs extracted from all of the
target file's associated JSON sidecars.
Notes:
A dictionary containing metadata extracted from all matching .json
files is returned. In cases where the same key is found in multiple
files, the values in files closer to the input filename will take
precedence, per the inheritance rules in the BIDS specification. | [
"Return",
"metadata",
"found",
"in",
"JSON",
"sidecars",
"for",
"the",
"specified",
"file",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L650-L684 | train |
bids-standard/pybids | bids/layout/layout.py | BIDSLayout.get_bval | def get_bval(self, path, **kwargs):
""" Get bval file for passed path. """
result = self.get_nearest(path, extensions='bval', suffix='dwi',
all_=True, **kwargs)
return listify(result)[0] | python | def get_bval(self, path, **kwargs):
""" Get bval file for passed path. """
result = self.get_nearest(path, extensions='bval', suffix='dwi',
all_=True, **kwargs)
return listify(result)[0] | [
"def",
"get_bval",
"(",
"self",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"get_nearest",
"(",
"path",
",",
"extensions",
"=",
"'bval'",
",",
"suffix",
"=",
"'dwi'",
",",
"all_",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"return",
"listify",
"(",
"result",
")",
"[",
"0",
"]"
] | Get bval file for passed path. | [
"Get",
"bval",
"file",
"for",
"passed",
"path",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L794-L798 | train |
bids-standard/pybids | bids/layout/layout.py | BIDSLayout.copy_files | def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **kwargs):
"""
Copies one or more BIDSFiles to new locations defined by each
BIDSFile's entities and the specified path_patterns.
Args:
files (list): Optional list of BIDSFile objects to write out. If
none provided, use files from running a get() query using
remaining **kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): Defines the desired action when the output path
already exists. Must be one of:
'fail': raises an exception
'skip' does nothing
'overwrite': overwrites the existing file
'append': adds a suffix to each file copy, starting with 1
kwargs (kwargs): Optional key word arguments to pass into a get()
query.
"""
_files = self.get(return_type='objects', **kwargs)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts) | python | def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **kwargs):
"""
Copies one or more BIDSFiles to new locations defined by each
BIDSFile's entities and the specified path_patterns.
Args:
files (list): Optional list of BIDSFile objects to write out. If
none provided, use files from running a get() query using
remaining **kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): Defines the desired action when the output path
already exists. Must be one of:
'fail': raises an exception
'skip' does nothing
'overwrite': overwrites the existing file
'append': adds a suffix to each file copy, starting with 1
kwargs (kwargs): Optional key word arguments to pass into a get()
query.
"""
_files = self.get(return_type='objects', **kwargs)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts) | [
"def",
"copy_files",
"(",
"self",
",",
"files",
"=",
"None",
",",
"path_patterns",
"=",
"None",
",",
"symbolic_links",
"=",
"True",
",",
"root",
"=",
"None",
",",
"conflicts",
"=",
"'fail'",
",",
"*",
"*",
"kwargs",
")",
":",
"_files",
"=",
"self",
".",
"get",
"(",
"return_type",
"=",
"'objects'",
",",
"*",
"*",
"kwargs",
")",
"if",
"files",
":",
"_files",
"=",
"list",
"(",
"set",
"(",
"files",
")",
".",
"intersection",
"(",
"_files",
")",
")",
"for",
"f",
"in",
"_files",
":",
"f",
".",
"copy",
"(",
"path_patterns",
",",
"symbolic_link",
"=",
"symbolic_links",
",",
"root",
"=",
"self",
".",
"root",
",",
"conflicts",
"=",
"conflicts",
")"
] | Copies one or more BIDSFiles to new locations defined by each
BIDSFile's entities and the specified path_patterns.
Args:
files (list): Optional list of BIDSFile objects to write out. If
none provided, use files from running a get() query using
remaining **kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): Defines the desired action when the output path
already exists. Must be one of:
'fail': raises an exception
'skip' does nothing
'overwrite': overwrites the existing file
'append': adds a suffix to each file copy, starting with 1
kwargs (kwargs): Optional key word arguments to pass into a get()
query. | [
"Copies",
"one",
"or",
"more",
"BIDSFiles",
"to",
"new",
"locations",
"defined",
"by",
"each",
"BIDSFile",
"s",
"entities",
"and",
"the",
"specified",
"path_patterns",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L950-L981 | train |
bids-standard/pybids | bids/layout/layout.py | MetadataIndex.index_file | def index_file(self, f, overwrite=False):
"""Index metadata for the specified file.
Args:
f (BIDSFile, str): A BIDSFile or path to an indexed file.
overwrite (bool): If True, forces reindexing of the file even if
an entry already exists.
"""
if isinstance(f, six.string_types):
f = self.layout.get_file(f)
if f.path in self.file_index and not overwrite:
return
if 'suffix' not in f.entities: # Skip files without suffixes
return
md = self._get_metadata(f.path)
for md_key, md_val in md.items():
if md_key not in self.key_index:
self.key_index[md_key] = {}
self.key_index[md_key][f.path] = md_val
self.file_index[f.path][md_key] = md_val | python | def index_file(self, f, overwrite=False):
"""Index metadata for the specified file.
Args:
f (BIDSFile, str): A BIDSFile or path to an indexed file.
overwrite (bool): If True, forces reindexing of the file even if
an entry already exists.
"""
if isinstance(f, six.string_types):
f = self.layout.get_file(f)
if f.path in self.file_index and not overwrite:
return
if 'suffix' not in f.entities: # Skip files without suffixes
return
md = self._get_metadata(f.path)
for md_key, md_val in md.items():
if md_key not in self.key_index:
self.key_index[md_key] = {}
self.key_index[md_key][f.path] = md_val
self.file_index[f.path][md_key] = md_val | [
"def",
"index_file",
"(",
"self",
",",
"f",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"f",
",",
"six",
".",
"string_types",
")",
":",
"f",
"=",
"self",
".",
"layout",
".",
"get_file",
"(",
"f",
")",
"if",
"f",
".",
"path",
"in",
"self",
".",
"file_index",
"and",
"not",
"overwrite",
":",
"return",
"if",
"'suffix'",
"not",
"in",
"f",
".",
"entities",
":",
"# Skip files without suffixes",
"return",
"md",
"=",
"self",
".",
"_get_metadata",
"(",
"f",
".",
"path",
")",
"for",
"md_key",
",",
"md_val",
"in",
"md",
".",
"items",
"(",
")",
":",
"if",
"md_key",
"not",
"in",
"self",
".",
"key_index",
":",
"self",
".",
"key_index",
"[",
"md_key",
"]",
"=",
"{",
"}",
"self",
".",
"key_index",
"[",
"md_key",
"]",
"[",
"f",
".",
"path",
"]",
"=",
"md_val",
"self",
".",
"file_index",
"[",
"f",
".",
"path",
"]",
"[",
"md_key",
"]",
"=",
"md_val"
] | Index metadata for the specified file.
Args:
f (BIDSFile, str): A BIDSFile or path to an indexed file.
overwrite (bool): If True, forces reindexing of the file even if
an entry already exists. | [
"Index",
"metadata",
"for",
"the",
"specified",
"file",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L1036-L1059 | train |
bids-standard/pybids | bids/layout/layout.py | MetadataIndex.search | def search(self, files=None, defined_fields=None, **kwargs):
"""Search files in the layout by metadata fields.
Args:
files (list): Optional list of names of files to search. If None,
all files in the layout are scanned.
defined_fields (list): Optional list of names of fields that must
be defined in the JSON sidecar in order to consider the file a
match, but which don't need to match any particular value.
kwargs: Optional keyword arguments defining search constraints;
keys are names of metadata fields, and values are the values
to match those fields against (e.g., SliceTiming=0.017) would
return all files that have a SliceTiming value of 0.071 in
metadata.
Returns: A list of filenames that match all constraints.
"""
if defined_fields is None:
defined_fields = []
all_keys = set(defined_fields) | set(kwargs.keys())
if not all_keys:
raise ValueError("At least one field to search on must be passed.")
# If no list of files is passed, use all files in layout
if files is None:
files = set(self.layout.files.keys())
# Index metadata for any previously unseen files
for f in files:
self.index_file(f)
# Get file intersection of all kwargs keys--this is fast
filesets = [set(self.key_index.get(k, [])) for k in all_keys]
matches = reduce(lambda x, y: x & y, filesets)
if files is not None:
matches &= set(files)
if not matches:
return []
def check_matches(f, key, val):
if isinstance(val, six.string_types) and '*' in val:
val = ('^%s$' % val).replace('*', ".*")
return re.search(str(self.file_index[f][key]), val) is not None
else:
return val == self.file_index[f][key]
# Serially check matches against each pattern, with early termination
for k, val in kwargs.items():
matches = list(filter(lambda x: check_matches(x, k, val), matches))
if not matches:
return []
return matches | python | def search(self, files=None, defined_fields=None, **kwargs):
"""Search files in the layout by metadata fields.
Args:
files (list): Optional list of names of files to search. If None,
all files in the layout are scanned.
defined_fields (list): Optional list of names of fields that must
be defined in the JSON sidecar in order to consider the file a
match, but which don't need to match any particular value.
kwargs: Optional keyword arguments defining search constraints;
keys are names of metadata fields, and values are the values
to match those fields against (e.g., SliceTiming=0.017) would
return all files that have a SliceTiming value of 0.071 in
metadata.
Returns: A list of filenames that match all constraints.
"""
if defined_fields is None:
defined_fields = []
all_keys = set(defined_fields) | set(kwargs.keys())
if not all_keys:
raise ValueError("At least one field to search on must be passed.")
# If no list of files is passed, use all files in layout
if files is None:
files = set(self.layout.files.keys())
# Index metadata for any previously unseen files
for f in files:
self.index_file(f)
# Get file intersection of all kwargs keys--this is fast
filesets = [set(self.key_index.get(k, [])) for k in all_keys]
matches = reduce(lambda x, y: x & y, filesets)
if files is not None:
matches &= set(files)
if not matches:
return []
def check_matches(f, key, val):
if isinstance(val, six.string_types) and '*' in val:
val = ('^%s$' % val).replace('*', ".*")
return re.search(str(self.file_index[f][key]), val) is not None
else:
return val == self.file_index[f][key]
# Serially check matches against each pattern, with early termination
for k, val in kwargs.items():
matches = list(filter(lambda x: check_matches(x, k, val), matches))
if not matches:
return []
return matches | [
"def",
"search",
"(",
"self",
",",
"files",
"=",
"None",
",",
"defined_fields",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"defined_fields",
"is",
"None",
":",
"defined_fields",
"=",
"[",
"]",
"all_keys",
"=",
"set",
"(",
"defined_fields",
")",
"|",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"if",
"not",
"all_keys",
":",
"raise",
"ValueError",
"(",
"\"At least one field to search on must be passed.\"",
")",
"# If no list of files is passed, use all files in layout",
"if",
"files",
"is",
"None",
":",
"files",
"=",
"set",
"(",
"self",
".",
"layout",
".",
"files",
".",
"keys",
"(",
")",
")",
"# Index metadata for any previously unseen files",
"for",
"f",
"in",
"files",
":",
"self",
".",
"index_file",
"(",
"f",
")",
"# Get file intersection of all kwargs keys--this is fast",
"filesets",
"=",
"[",
"set",
"(",
"self",
".",
"key_index",
".",
"get",
"(",
"k",
",",
"[",
"]",
")",
")",
"for",
"k",
"in",
"all_keys",
"]",
"matches",
"=",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"&",
"y",
",",
"filesets",
")",
"if",
"files",
"is",
"not",
"None",
":",
"matches",
"&=",
"set",
"(",
"files",
")",
"if",
"not",
"matches",
":",
"return",
"[",
"]",
"def",
"check_matches",
"(",
"f",
",",
"key",
",",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
"and",
"'*'",
"in",
"val",
":",
"val",
"=",
"(",
"'^%s$'",
"%",
"val",
")",
".",
"replace",
"(",
"'*'",
",",
"\".*\"",
")",
"return",
"re",
".",
"search",
"(",
"str",
"(",
"self",
".",
"file_index",
"[",
"f",
"]",
"[",
"key",
"]",
")",
",",
"val",
")",
"is",
"not",
"None",
"else",
":",
"return",
"val",
"==",
"self",
".",
"file_index",
"[",
"f",
"]",
"[",
"key",
"]",
"# Serially check matches against each pattern, with early termination",
"for",
"k",
",",
"val",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"matches",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"check_matches",
"(",
"x",
",",
"k",
",",
"val",
")",
",",
"matches",
")",
")",
"if",
"not",
"matches",
":",
"return",
"[",
"]",
"return",
"matches"
] | Search files in the layout by metadata fields.
Args:
files (list): Optional list of names of files to search. If None,
all files in the layout are scanned.
defined_fields (list): Optional list of names of fields that must
be defined in the JSON sidecar in order to consider the file a
match, but which don't need to match any particular value.
kwargs: Optional keyword arguments defining search constraints;
keys are names of metadata fields, and values are the values
to match those fields against (e.g., SliceTiming=0.017) would
return all files that have a SliceTiming value of 0.071 in
metadata.
Returns: A list of filenames that match all constraints. | [
"Search",
"files",
"in",
"the",
"layout",
"by",
"metadata",
"fields",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L1080-L1136 | train |
bids-standard/pybids | bids/analysis/auto_model.py | auto_model | def auto_model(layout, scan_length=None, one_vs_rest=False):
'''Create a simple default model for each of the tasks in a BIDSLayout.
Contrasts each trial type against all other trial types and trial types
at the run level and then uses t-tests at each other level present to
aggregate these results up.
Args:
layout (BIDSLayout) A BIDSLayout instance
scan_length (Int) Scan length for loading event varibles in cases
where the scan length can not be read from the nifti.
Primarily for testing.
one_vs_rest (Bool) Set to True if you would like to autogenerate
contrasts of each trial type against everyother trialtype.
Returns:
models (list) list of model dictionaries for each task
'''
base_name = split(layout.root)[-1]
tasks = layout.entities['task'].unique()
task_models = []
for task_name in tasks:
# Populate model meta-data
model = OrderedDict()
model["Name"] = "_".join([base_name, task_name])
model["Description"] = ("Autogenerated model for the %s task from %s" %
(task_name, base_name))
model["Input"] = {"Task": task_name}
steps = []
# Make run level block
transformations = OrderedDict(Name='Factor', Input=['trial_type'])
run = OrderedDict(Level='Run', Name='Run',
Transformations=[transformations])
# Get trial types
run_nodes = load_variables(layout, task=task_name, levels=['run'],
scan_length=scan_length)
evs = []
for n in run_nodes.nodes:
evs.extend(n.variables['trial_type'].values.values)
trial_types = np.unique(evs)
trial_type_factors = ["trial_type." + tt for tt in trial_types]
# Add HRF
run['Transformations'].append(
OrderedDict(Name='Convolve', Input=trial_type_factors))
run_model = OrderedDict(X=trial_type_factors)
run["Model"] = run_model
if one_vs_rest:
# if there are multiple trial types, build contrasts
contrasts = []
for i, tt in enumerate(trial_types):
cdict = OrderedDict()
if len(trial_types) > 1:
cdict["Name"] = "run_" + tt + "_vs_others"
else:
cdict["Name"] = "run_" + tt
cdict["ConditionList"] = trial_type_factors
# Calculate weights for contrast
weights = np.ones(len(trial_types))
try:
weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
except ZeroDivisionError:
pass
cdict["Weights"] = list(weights)
cdict["Type"] = "t"
contrasts.append(cdict)
run["Contrasts"] = contrasts
steps.append(run)
if one_vs_rest:
# if there are multiple sessions, t-test run level contrasts at
# session level
sessions = layout.get_sessions()
if len(sessions) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Session",
contrast_names))
subjects = layout.get_subjects()
if len(subjects) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Subject",
contrast_names))
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Dataset",
contrast_names))
model["Steps"] = steps
task_models.append(model)
return task_models | python | def auto_model(layout, scan_length=None, one_vs_rest=False):
'''Create a simple default model for each of the tasks in a BIDSLayout.
Contrasts each trial type against all other trial types and trial types
at the run level and then uses t-tests at each other level present to
aggregate these results up.
Args:
layout (BIDSLayout) A BIDSLayout instance
scan_length (Int) Scan length for loading event varibles in cases
where the scan length can not be read from the nifti.
Primarily for testing.
one_vs_rest (Bool) Set to True if you would like to autogenerate
contrasts of each trial type against everyother trialtype.
Returns:
models (list) list of model dictionaries for each task
'''
base_name = split(layout.root)[-1]
tasks = layout.entities['task'].unique()
task_models = []
for task_name in tasks:
# Populate model meta-data
model = OrderedDict()
model["Name"] = "_".join([base_name, task_name])
model["Description"] = ("Autogenerated model for the %s task from %s" %
(task_name, base_name))
model["Input"] = {"Task": task_name}
steps = []
# Make run level block
transformations = OrderedDict(Name='Factor', Input=['trial_type'])
run = OrderedDict(Level='Run', Name='Run',
Transformations=[transformations])
# Get trial types
run_nodes = load_variables(layout, task=task_name, levels=['run'],
scan_length=scan_length)
evs = []
for n in run_nodes.nodes:
evs.extend(n.variables['trial_type'].values.values)
trial_types = np.unique(evs)
trial_type_factors = ["trial_type." + tt for tt in trial_types]
# Add HRF
run['Transformations'].append(
OrderedDict(Name='Convolve', Input=trial_type_factors))
run_model = OrderedDict(X=trial_type_factors)
run["Model"] = run_model
if one_vs_rest:
# if there are multiple trial types, build contrasts
contrasts = []
for i, tt in enumerate(trial_types):
cdict = OrderedDict()
if len(trial_types) > 1:
cdict["Name"] = "run_" + tt + "_vs_others"
else:
cdict["Name"] = "run_" + tt
cdict["ConditionList"] = trial_type_factors
# Calculate weights for contrast
weights = np.ones(len(trial_types))
try:
weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
except ZeroDivisionError:
pass
cdict["Weights"] = list(weights)
cdict["Type"] = "t"
contrasts.append(cdict)
run["Contrasts"] = contrasts
steps.append(run)
if one_vs_rest:
# if there are multiple sessions, t-test run level contrasts at
# session level
sessions = layout.get_sessions()
if len(sessions) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Session",
contrast_names))
subjects = layout.get_subjects()
if len(subjects) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Subject",
contrast_names))
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Dataset",
contrast_names))
model["Steps"] = steps
task_models.append(model)
return task_models | [
"def",
"auto_model",
"(",
"layout",
",",
"scan_length",
"=",
"None",
",",
"one_vs_rest",
"=",
"False",
")",
":",
"base_name",
"=",
"split",
"(",
"layout",
".",
"root",
")",
"[",
"-",
"1",
"]",
"tasks",
"=",
"layout",
".",
"entities",
"[",
"'task'",
"]",
".",
"unique",
"(",
")",
"task_models",
"=",
"[",
"]",
"for",
"task_name",
"in",
"tasks",
":",
"# Populate model meta-data",
"model",
"=",
"OrderedDict",
"(",
")",
"model",
"[",
"\"Name\"",
"]",
"=",
"\"_\"",
".",
"join",
"(",
"[",
"base_name",
",",
"task_name",
"]",
")",
"model",
"[",
"\"Description\"",
"]",
"=",
"(",
"\"Autogenerated model for the %s task from %s\"",
"%",
"(",
"task_name",
",",
"base_name",
")",
")",
"model",
"[",
"\"Input\"",
"]",
"=",
"{",
"\"Task\"",
":",
"task_name",
"}",
"steps",
"=",
"[",
"]",
"# Make run level block",
"transformations",
"=",
"OrderedDict",
"(",
"Name",
"=",
"'Factor'",
",",
"Input",
"=",
"[",
"'trial_type'",
"]",
")",
"run",
"=",
"OrderedDict",
"(",
"Level",
"=",
"'Run'",
",",
"Name",
"=",
"'Run'",
",",
"Transformations",
"=",
"[",
"transformations",
"]",
")",
"# Get trial types",
"run_nodes",
"=",
"load_variables",
"(",
"layout",
",",
"task",
"=",
"task_name",
",",
"levels",
"=",
"[",
"'run'",
"]",
",",
"scan_length",
"=",
"scan_length",
")",
"evs",
"=",
"[",
"]",
"for",
"n",
"in",
"run_nodes",
".",
"nodes",
":",
"evs",
".",
"extend",
"(",
"n",
".",
"variables",
"[",
"'trial_type'",
"]",
".",
"values",
".",
"values",
")",
"trial_types",
"=",
"np",
".",
"unique",
"(",
"evs",
")",
"trial_type_factors",
"=",
"[",
"\"trial_type.\"",
"+",
"tt",
"for",
"tt",
"in",
"trial_types",
"]",
"# Add HRF",
"run",
"[",
"'Transformations'",
"]",
".",
"append",
"(",
"OrderedDict",
"(",
"Name",
"=",
"'Convolve'",
",",
"Input",
"=",
"trial_type_factors",
")",
")",
"run_model",
"=",
"OrderedDict",
"(",
"X",
"=",
"trial_type_factors",
")",
"run",
"[",
"\"Model\"",
"]",
"=",
"run_model",
"if",
"one_vs_rest",
":",
"# if there are multiple trial types, build contrasts",
"contrasts",
"=",
"[",
"]",
"for",
"i",
",",
"tt",
"in",
"enumerate",
"(",
"trial_types",
")",
":",
"cdict",
"=",
"OrderedDict",
"(",
")",
"if",
"len",
"(",
"trial_types",
")",
">",
"1",
":",
"cdict",
"[",
"\"Name\"",
"]",
"=",
"\"run_\"",
"+",
"tt",
"+",
"\"_vs_others\"",
"else",
":",
"cdict",
"[",
"\"Name\"",
"]",
"=",
"\"run_\"",
"+",
"tt",
"cdict",
"[",
"\"ConditionList\"",
"]",
"=",
"trial_type_factors",
"# Calculate weights for contrast",
"weights",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"trial_types",
")",
")",
"try",
":",
"weights",
"[",
"trial_types",
"!=",
"tt",
"]",
"=",
"-",
"1.0",
"/",
"(",
"len",
"(",
"trial_types",
")",
"-",
"1",
")",
"except",
"ZeroDivisionError",
":",
"pass",
"cdict",
"[",
"\"Weights\"",
"]",
"=",
"list",
"(",
"weights",
")",
"cdict",
"[",
"\"Type\"",
"]",
"=",
"\"t\"",
"contrasts",
".",
"append",
"(",
"cdict",
")",
"run",
"[",
"\"Contrasts\"",
"]",
"=",
"contrasts",
"steps",
".",
"append",
"(",
"run",
")",
"if",
"one_vs_rest",
":",
"# if there are multiple sessions, t-test run level contrasts at",
"# session level",
"sessions",
"=",
"layout",
".",
"get_sessions",
"(",
")",
"if",
"len",
"(",
"sessions",
")",
">",
"1",
":",
"# get contrasts names from previous block",
"contrast_names",
"=",
"[",
"cc",
"[",
"\"Name\"",
"]",
"for",
"cc",
"in",
"steps",
"[",
"-",
"1",
"]",
"[",
"\"Contrasts\"",
"]",
"]",
"steps",
".",
"append",
"(",
"_make_passthrough_contrast",
"(",
"\"Session\"",
",",
"contrast_names",
")",
")",
"subjects",
"=",
"layout",
".",
"get_subjects",
"(",
")",
"if",
"len",
"(",
"subjects",
")",
">",
"1",
":",
"# get contrasts names from previous block",
"contrast_names",
"=",
"[",
"cc",
"[",
"\"Name\"",
"]",
"for",
"cc",
"in",
"steps",
"[",
"-",
"1",
"]",
"[",
"\"Contrasts\"",
"]",
"]",
"steps",
".",
"append",
"(",
"_make_passthrough_contrast",
"(",
"\"Subject\"",
",",
"contrast_names",
")",
")",
"# get contrasts names from previous block",
"contrast_names",
"=",
"[",
"cc",
"[",
"\"Name\"",
"]",
"for",
"cc",
"in",
"steps",
"[",
"-",
"1",
"]",
"[",
"\"Contrasts\"",
"]",
"]",
"steps",
".",
"append",
"(",
"_make_passthrough_contrast",
"(",
"\"Dataset\"",
",",
"contrast_names",
")",
")",
"model",
"[",
"\"Steps\"",
"]",
"=",
"steps",
"task_models",
".",
"append",
"(",
"model",
")",
"return",
"task_models"
] | Create a simple default model for each of the tasks in a BIDSLayout.
Contrasts each trial type against all other trial types and trial types
at the run level and then uses t-tests at each other level present to
aggregate these results up.
Args:
layout (BIDSLayout) A BIDSLayout instance
scan_length (Int) Scan length for loading event varibles in cases
where the scan length can not be read from the nifti.
Primarily for testing.
one_vs_rest (Bool) Set to True if you would like to autogenerate
contrasts of each trial type against everyother trialtype.
Returns:
models (list) list of model dictionaries for each task | [
"Create",
"a",
"simple",
"default",
"model",
"for",
"each",
"of",
"the",
"tasks",
"in",
"a",
"BIDSLayout",
".",
"Contrasts",
"each",
"trial",
"type",
"against",
"all",
"other",
"trial",
"types",
"and",
"trial",
"types",
"at",
"the",
"run",
"level",
"and",
"then",
"uses",
"t",
"-",
"tests",
"at",
"each",
"other",
"level",
"present",
"to",
"aggregate",
"these",
"results",
"up",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/auto_model.py#L19-L122 | train |
bids-standard/pybids | bids/variables/variables.py | SimpleVariable.split | def split(self, grouper):
''' Split the current SparseRunVariable into multiple columns.
Args:
grouper (iterable): list to groupby, where each unique value will
be taken as the name of the resulting column.
Returns:
A list of SparseRunVariables, one per unique value in the
grouper.
'''
data = self.to_df(condition=True, entities=True)
data = data.drop('condition', axis=1)
subsets = []
for i, (name, g) in enumerate(data.groupby(grouper)):
name = '%s.%s' % (self.name, name)
col = self.__class__(name=name, data=g, source=self.source,
run_info=getattr(self, 'run_info', None))
subsets.append(col)
return subsets | python | def split(self, grouper):
''' Split the current SparseRunVariable into multiple columns.
Args:
grouper (iterable): list to groupby, where each unique value will
be taken as the name of the resulting column.
Returns:
A list of SparseRunVariables, one per unique value in the
grouper.
'''
data = self.to_df(condition=True, entities=True)
data = data.drop('condition', axis=1)
subsets = []
for i, (name, g) in enumerate(data.groupby(grouper)):
name = '%s.%s' % (self.name, name)
col = self.__class__(name=name, data=g, source=self.source,
run_info=getattr(self, 'run_info', None))
subsets.append(col)
return subsets | [
"def",
"split",
"(",
"self",
",",
"grouper",
")",
":",
"data",
"=",
"self",
".",
"to_df",
"(",
"condition",
"=",
"True",
",",
"entities",
"=",
"True",
")",
"data",
"=",
"data",
".",
"drop",
"(",
"'condition'",
",",
"axis",
"=",
"1",
")",
"subsets",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"name",
",",
"g",
")",
"in",
"enumerate",
"(",
"data",
".",
"groupby",
"(",
"grouper",
")",
")",
":",
"name",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"name",
",",
"name",
")",
"col",
"=",
"self",
".",
"__class__",
"(",
"name",
"=",
"name",
",",
"data",
"=",
"g",
",",
"source",
"=",
"self",
".",
"source",
",",
"run_info",
"=",
"getattr",
"(",
"self",
",",
"'run_info'",
",",
"None",
")",
")",
"subsets",
".",
"append",
"(",
"col",
")",
"return",
"subsets"
] | Split the current SparseRunVariable into multiple columns.
Args:
grouper (iterable): list to groupby, where each unique value will
be taken as the name of the resulting column.
Returns:
A list of SparseRunVariables, one per unique value in the
grouper. | [
"Split",
"the",
"current",
"SparseRunVariable",
"into",
"multiple",
"columns",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L240-L260 | train |
bids-standard/pybids | bids/variables/variables.py | SimpleVariable.select_rows | def select_rows(self, rows):
''' Truncate internal arrays to keep only the specified rows.
Args:
rows (array): An integer or boolean array identifying the indices
of rows to keep.
'''
self.values = self.values.iloc[rows]
self.index = self.index.iloc[rows, :]
for prop in self._property_columns:
vals = getattr(self, prop)[rows]
setattr(self, prop, vals) | python | def select_rows(self, rows):
''' Truncate internal arrays to keep only the specified rows.
Args:
rows (array): An integer or boolean array identifying the indices
of rows to keep.
'''
self.values = self.values.iloc[rows]
self.index = self.index.iloc[rows, :]
for prop in self._property_columns:
vals = getattr(self, prop)[rows]
setattr(self, prop, vals) | [
"def",
"select_rows",
"(",
"self",
",",
"rows",
")",
":",
"self",
".",
"values",
"=",
"self",
".",
"values",
".",
"iloc",
"[",
"rows",
"]",
"self",
".",
"index",
"=",
"self",
".",
"index",
".",
"iloc",
"[",
"rows",
",",
":",
"]",
"for",
"prop",
"in",
"self",
".",
"_property_columns",
":",
"vals",
"=",
"getattr",
"(",
"self",
",",
"prop",
")",
"[",
"rows",
"]",
"setattr",
"(",
"self",
",",
"prop",
",",
"vals",
")"
] | Truncate internal arrays to keep only the specified rows.
Args:
rows (array): An integer or boolean array identifying the indices
of rows to keep. | [
"Truncate",
"internal",
"arrays",
"to",
"keep",
"only",
"the",
"specified",
"rows",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L269-L280 | train |
bids-standard/pybids | bids/variables/variables.py | DenseRunVariable.split | def split(self, grouper):
'''Split the current DenseRunVariable into multiple columns.
Parameters
----------
grouper : :obj:`pandas.DataFrame`
Binary DF specifying the design matrix to use for splitting. Number
of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable``
will be generated for each column in the grouper.
Returns
-------
A list of DenseRunVariables, one per unique value in the grouper.
'''
values = grouper.values * self.values.values
df = pd.DataFrame(values, columns=grouper.columns)
return [DenseRunVariable(name='%s.%s' % (self.name, name),
values=df[name].values,
run_info=self.run_info,
source=self.source,
sampling_rate=self.sampling_rate)
for i, name in enumerate(df.columns)] | python | def split(self, grouper):
'''Split the current DenseRunVariable into multiple columns.
Parameters
----------
grouper : :obj:`pandas.DataFrame`
Binary DF specifying the design matrix to use for splitting. Number
of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable``
will be generated for each column in the grouper.
Returns
-------
A list of DenseRunVariables, one per unique value in the grouper.
'''
values = grouper.values * self.values.values
df = pd.DataFrame(values, columns=grouper.columns)
return [DenseRunVariable(name='%s.%s' % (self.name, name),
values=df[name].values,
run_info=self.run_info,
source=self.source,
sampling_rate=self.sampling_rate)
for i, name in enumerate(df.columns)] | [
"def",
"split",
"(",
"self",
",",
"grouper",
")",
":",
"values",
"=",
"grouper",
".",
"values",
"*",
"self",
".",
"values",
".",
"values",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"values",
",",
"columns",
"=",
"grouper",
".",
"columns",
")",
"return",
"[",
"DenseRunVariable",
"(",
"name",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"name",
",",
"name",
")",
",",
"values",
"=",
"df",
"[",
"name",
"]",
".",
"values",
",",
"run_info",
"=",
"self",
".",
"run_info",
",",
"source",
"=",
"self",
".",
"source",
",",
"sampling_rate",
"=",
"self",
".",
"sampling_rate",
")",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"df",
".",
"columns",
")",
"]"
] | Split the current DenseRunVariable into multiple columns.
Parameters
----------
grouper : :obj:`pandas.DataFrame`
Binary DF specifying the design matrix to use for splitting. Number
of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable``
will be generated for each column in the grouper.
Returns
-------
A list of DenseRunVariables, one per unique value in the grouper. | [
"Split",
"the",
"current",
"DenseRunVariable",
"into",
"multiple",
"columns",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L393-L414 | train |
bids-standard/pybids | bids/variables/variables.py | DenseRunVariable._build_entity_index | def _build_entity_index(self, run_info, sampling_rate):
''' Build the entity index from run information. '''
index = []
interval = int(round(1000. / sampling_rate))
_timestamps = []
for run in run_info:
reps = int(math.ceil(run.duration * sampling_rate))
ent_vals = list(run.entities.values())
df = pd.DataFrame([ent_vals] * reps, columns=list(run.entities.keys()))
ts = pd.date_range(0, periods=len(df), freq='%sms' % interval)
_timestamps.append(ts.to_series())
index.append(df)
self.timestamps = pd.concat(_timestamps, axis=0, sort=True)
return pd.concat(index, axis=0, sort=True).reset_index(drop=True) | python | def _build_entity_index(self, run_info, sampling_rate):
''' Build the entity index from run information. '''
index = []
interval = int(round(1000. / sampling_rate))
_timestamps = []
for run in run_info:
reps = int(math.ceil(run.duration * sampling_rate))
ent_vals = list(run.entities.values())
df = pd.DataFrame([ent_vals] * reps, columns=list(run.entities.keys()))
ts = pd.date_range(0, periods=len(df), freq='%sms' % interval)
_timestamps.append(ts.to_series())
index.append(df)
self.timestamps = pd.concat(_timestamps, axis=0, sort=True)
return pd.concat(index, axis=0, sort=True).reset_index(drop=True) | [
"def",
"_build_entity_index",
"(",
"self",
",",
"run_info",
",",
"sampling_rate",
")",
":",
"index",
"=",
"[",
"]",
"interval",
"=",
"int",
"(",
"round",
"(",
"1000.",
"/",
"sampling_rate",
")",
")",
"_timestamps",
"=",
"[",
"]",
"for",
"run",
"in",
"run_info",
":",
"reps",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"run",
".",
"duration",
"*",
"sampling_rate",
")",
")",
"ent_vals",
"=",
"list",
"(",
"run",
".",
"entities",
".",
"values",
"(",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"ent_vals",
"]",
"*",
"reps",
",",
"columns",
"=",
"list",
"(",
"run",
".",
"entities",
".",
"keys",
"(",
")",
")",
")",
"ts",
"=",
"pd",
".",
"date_range",
"(",
"0",
",",
"periods",
"=",
"len",
"(",
"df",
")",
",",
"freq",
"=",
"'%sms'",
"%",
"interval",
")",
"_timestamps",
".",
"append",
"(",
"ts",
".",
"to_series",
"(",
")",
")",
"index",
".",
"append",
"(",
"df",
")",
"self",
".",
"timestamps",
"=",
"pd",
".",
"concat",
"(",
"_timestamps",
",",
"axis",
"=",
"0",
",",
"sort",
"=",
"True",
")",
"return",
"pd",
".",
"concat",
"(",
"index",
",",
"axis",
"=",
"0",
",",
"sort",
"=",
"True",
")",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")"
] | Build the entity index from run information. | [
"Build",
"the",
"entity",
"index",
"from",
"run",
"information",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L416-L430 | train |
bids-standard/pybids | bids/variables/variables.py | DenseRunVariable.resample | def resample(self, sampling_rate, inplace=False, kind='linear'):
'''Resample the Variable to the specified sampling rate.
Parameters
----------
sampling_rate : :obj:`int`, :obj:`float`
Target sampling rate (in Hz).
inplace : :obj:`bool`, optional
If True, performs resampling in-place. If False, returns a resampled
copy of the current Variable. Default is False.
kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates
the kind of interpolation approach to use. See interp1d docs for
valid values. Default is 'linear'.
'''
if not inplace:
var = self.clone()
var.resample(sampling_rate, True, kind)
return var
if sampling_rate == self.sampling_rate:
return
old_sr = self.sampling_rate
n = len(self.index)
self.index = self._build_entity_index(self.run_info, sampling_rate)
x = np.arange(n)
num = len(self.index)
from scipy.interpolate import interp1d
f = interp1d(x, self.values.values.ravel(), kind=kind)
x_new = np.linspace(0, n - 1, num=num)
self.values = pd.DataFrame(f(x_new))
assert len(self.values) == len(self.index)
self.sampling_rate = sampling_rate | python | def resample(self, sampling_rate, inplace=False, kind='linear'):
'''Resample the Variable to the specified sampling rate.
Parameters
----------
sampling_rate : :obj:`int`, :obj:`float`
Target sampling rate (in Hz).
inplace : :obj:`bool`, optional
If True, performs resampling in-place. If False, returns a resampled
copy of the current Variable. Default is False.
kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates
the kind of interpolation approach to use. See interp1d docs for
valid values. Default is 'linear'.
'''
if not inplace:
var = self.clone()
var.resample(sampling_rate, True, kind)
return var
if sampling_rate == self.sampling_rate:
return
old_sr = self.sampling_rate
n = len(self.index)
self.index = self._build_entity_index(self.run_info, sampling_rate)
x = np.arange(n)
num = len(self.index)
from scipy.interpolate import interp1d
f = interp1d(x, self.values.values.ravel(), kind=kind)
x_new = np.linspace(0, n - 1, num=num)
self.values = pd.DataFrame(f(x_new))
assert len(self.values) == len(self.index)
self.sampling_rate = sampling_rate | [
"def",
"resample",
"(",
"self",
",",
"sampling_rate",
",",
"inplace",
"=",
"False",
",",
"kind",
"=",
"'linear'",
")",
":",
"if",
"not",
"inplace",
":",
"var",
"=",
"self",
".",
"clone",
"(",
")",
"var",
".",
"resample",
"(",
"sampling_rate",
",",
"True",
",",
"kind",
")",
"return",
"var",
"if",
"sampling_rate",
"==",
"self",
".",
"sampling_rate",
":",
"return",
"old_sr",
"=",
"self",
".",
"sampling_rate",
"n",
"=",
"len",
"(",
"self",
".",
"index",
")",
"self",
".",
"index",
"=",
"self",
".",
"_build_entity_index",
"(",
"self",
".",
"run_info",
",",
"sampling_rate",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"n",
")",
"num",
"=",
"len",
"(",
"self",
".",
"index",
")",
"from",
"scipy",
".",
"interpolate",
"import",
"interp1d",
"f",
"=",
"interp1d",
"(",
"x",
",",
"self",
".",
"values",
".",
"values",
".",
"ravel",
"(",
")",
",",
"kind",
"=",
"kind",
")",
"x_new",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"n",
"-",
"1",
",",
"num",
"=",
"num",
")",
"self",
".",
"values",
"=",
"pd",
".",
"DataFrame",
"(",
"f",
"(",
"x_new",
")",
")",
"assert",
"len",
"(",
"self",
".",
"values",
")",
"==",
"len",
"(",
"self",
".",
"index",
")",
"self",
".",
"sampling_rate",
"=",
"sampling_rate"
] | Resample the Variable to the specified sampling rate.
Parameters
----------
sampling_rate : :obj:`int`, :obj:`float`
Target sampling rate (in Hz).
inplace : :obj:`bool`, optional
If True, performs resampling in-place. If False, returns a resampled
copy of the current Variable. Default is False.
kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates
the kind of interpolation approach to use. See interp1d docs for
valid values. Default is 'linear'. | [
"Resample",
"the",
"Variable",
"to",
"the",
"specified",
"sampling",
"rate",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L432-L469 | train |
bids-standard/pybids | bids/variables/variables.py | DenseRunVariable.to_df | def to_df(self, condition=True, entities=True, timing=True, sampling_rate=None):
'''Convert to a DataFrame, with columns for name and entities.
Parameters
----------
condition : :obj:`bool`
If True, adds a column for condition name, and names the amplitude
column 'amplitude'. If False, returns just onset, duration, and
amplitude, and gives the amplitude column the current column name.
entities : :obj:`bool`
If True, adds extra columns for all entities.
timing : :obj:`bool`
If True, includes onset and duration columns (even though events are
sampled uniformly). If False, omits them.
'''
if sampling_rate not in (None, self.sampling_rate):
return self.resample(sampling_rate).to_df(condition, entities)
df = super(DenseRunVariable, self).to_df(condition, entities)
if timing:
df['onset'] = self.timestamps.values.astype(float) / 1e+9
df['duration'] = 1. / self.sampling_rate
return df | python | def to_df(self, condition=True, entities=True, timing=True, sampling_rate=None):
'''Convert to a DataFrame, with columns for name and entities.
Parameters
----------
condition : :obj:`bool`
If True, adds a column for condition name, and names the amplitude
column 'amplitude'. If False, returns just onset, duration, and
amplitude, and gives the amplitude column the current column name.
entities : :obj:`bool`
If True, adds extra columns for all entities.
timing : :obj:`bool`
If True, includes onset and duration columns (even though events are
sampled uniformly). If False, omits them.
'''
if sampling_rate not in (None, self.sampling_rate):
return self.resample(sampling_rate).to_df(condition, entities)
df = super(DenseRunVariable, self).to_df(condition, entities)
if timing:
df['onset'] = self.timestamps.values.astype(float) / 1e+9
df['duration'] = 1. / self.sampling_rate
return df | [
"def",
"to_df",
"(",
"self",
",",
"condition",
"=",
"True",
",",
"entities",
"=",
"True",
",",
"timing",
"=",
"True",
",",
"sampling_rate",
"=",
"None",
")",
":",
"if",
"sampling_rate",
"not",
"in",
"(",
"None",
",",
"self",
".",
"sampling_rate",
")",
":",
"return",
"self",
".",
"resample",
"(",
"sampling_rate",
")",
".",
"to_df",
"(",
"condition",
",",
"entities",
")",
"df",
"=",
"super",
"(",
"DenseRunVariable",
",",
"self",
")",
".",
"to_df",
"(",
"condition",
",",
"entities",
")",
"if",
"timing",
":",
"df",
"[",
"'onset'",
"]",
"=",
"self",
".",
"timestamps",
".",
"values",
".",
"astype",
"(",
"float",
")",
"/",
"1e+9",
"df",
"[",
"'duration'",
"]",
"=",
"1.",
"/",
"self",
".",
"sampling_rate",
"return",
"df"
] | Convert to a DataFrame, with columns for name and entities.
Parameters
----------
condition : :obj:`bool`
If True, adds a column for condition name, and names the amplitude
column 'amplitude'. If False, returns just onset, duration, and
amplitude, and gives the amplitude column the current column name.
entities : :obj:`bool`
If True, adds extra columns for all entities.
timing : :obj:`bool`
If True, includes onset and duration columns (even though events are
sampled uniformly). If False, omits them. | [
"Convert",
"to",
"a",
"DataFrame",
"with",
"columns",
"for",
"name",
"and",
"entities",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L471-L495 | train |
bids-standard/pybids | bids/variables/entities.py | NodeIndex.get_collections | def get_collections(self, unit, names=None, merge=False,
sampling_rate=None, **entities):
''' Retrieve variable data for a specified level in the Dataset.
Args:
unit (str): The unit of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
names (list): Optional list of variables names to return. If
None, all available variables are returned.
merge (bool): If True, variables are merged across all observations
of the current unit. E.g., if unit='subject' and return_type=
'collection', variablesfrom all subjects will be merged into a
single collection. If False, each observation is handled
separately, and the result is returned as a list.
sampling_rate (int, str): If unit='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
entities: Optional constraints used to limit what gets returned.
Returns:
'''
nodes = self.get_nodes(unit, entities)
var_sets = []
for n in nodes:
var_set = list(n.variables.values())
var_set = [v for v in var_set if v.matches_entities(entities)]
if names is not None:
var_set = [v for v in var_set if v.name in names]
# Additional filtering on Variables past run level, because their
# contents are extracted from TSV files containing rows from
# multiple observations
if unit != 'run':
var_set = [v.filter(entities) for v in var_set]
var_sets.append(var_set)
if merge:
var_sets = [list(chain(*var_sets))]
results = []
for vs in var_sets:
if not vs:
continue
if unit == 'run':
vs = clc.BIDSRunVariableCollection(vs, sampling_rate)
else:
vs = clc.BIDSVariableCollection(vs)
results.append(vs)
if merge:
return results[0] if results else None
return results | python | def get_collections(self, unit, names=None, merge=False,
sampling_rate=None, **entities):
''' Retrieve variable data for a specified level in the Dataset.
Args:
unit (str): The unit of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
names (list): Optional list of variables names to return. If
None, all available variables are returned.
merge (bool): If True, variables are merged across all observations
of the current unit. E.g., if unit='subject' and return_type=
'collection', variablesfrom all subjects will be merged into a
single collection. If False, each observation is handled
separately, and the result is returned as a list.
sampling_rate (int, str): If unit='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
entities: Optional constraints used to limit what gets returned.
Returns:
'''
nodes = self.get_nodes(unit, entities)
var_sets = []
for n in nodes:
var_set = list(n.variables.values())
var_set = [v for v in var_set if v.matches_entities(entities)]
if names is not None:
var_set = [v for v in var_set if v.name in names]
# Additional filtering on Variables past run level, because their
# contents are extracted from TSV files containing rows from
# multiple observations
if unit != 'run':
var_set = [v.filter(entities) for v in var_set]
var_sets.append(var_set)
if merge:
var_sets = [list(chain(*var_sets))]
results = []
for vs in var_sets:
if not vs:
continue
if unit == 'run':
vs = clc.BIDSRunVariableCollection(vs, sampling_rate)
else:
vs = clc.BIDSVariableCollection(vs)
results.append(vs)
if merge:
return results[0] if results else None
return results | [
"def",
"get_collections",
"(",
"self",
",",
"unit",
",",
"names",
"=",
"None",
",",
"merge",
"=",
"False",
",",
"sampling_rate",
"=",
"None",
",",
"*",
"*",
"entities",
")",
":",
"nodes",
"=",
"self",
".",
"get_nodes",
"(",
"unit",
",",
"entities",
")",
"var_sets",
"=",
"[",
"]",
"for",
"n",
"in",
"nodes",
":",
"var_set",
"=",
"list",
"(",
"n",
".",
"variables",
".",
"values",
"(",
")",
")",
"var_set",
"=",
"[",
"v",
"for",
"v",
"in",
"var_set",
"if",
"v",
".",
"matches_entities",
"(",
"entities",
")",
"]",
"if",
"names",
"is",
"not",
"None",
":",
"var_set",
"=",
"[",
"v",
"for",
"v",
"in",
"var_set",
"if",
"v",
".",
"name",
"in",
"names",
"]",
"# Additional filtering on Variables past run level, because their",
"# contents are extracted from TSV files containing rows from",
"# multiple observations",
"if",
"unit",
"!=",
"'run'",
":",
"var_set",
"=",
"[",
"v",
".",
"filter",
"(",
"entities",
")",
"for",
"v",
"in",
"var_set",
"]",
"var_sets",
".",
"append",
"(",
"var_set",
")",
"if",
"merge",
":",
"var_sets",
"=",
"[",
"list",
"(",
"chain",
"(",
"*",
"var_sets",
")",
")",
"]",
"results",
"=",
"[",
"]",
"for",
"vs",
"in",
"var_sets",
":",
"if",
"not",
"vs",
":",
"continue",
"if",
"unit",
"==",
"'run'",
":",
"vs",
"=",
"clc",
".",
"BIDSRunVariableCollection",
"(",
"vs",
",",
"sampling_rate",
")",
"else",
":",
"vs",
"=",
"clc",
".",
"BIDSVariableCollection",
"(",
"vs",
")",
"results",
".",
"append",
"(",
"vs",
")",
"if",
"merge",
":",
"return",
"results",
"[",
"0",
"]",
"if",
"results",
"else",
"None",
"return",
"results"
] | Retrieve variable data for a specified level in the Dataset.
Args:
unit (str): The unit of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
names (list): Optional list of variables names to return. If
None, all available variables are returned.
merge (bool): If True, variables are merged across all observations
of the current unit. E.g., if unit='subject' and return_type=
'collection', variablesfrom all subjects will be merged into a
single collection. If False, each observation is handled
separately, and the result is returned as a list.
sampling_rate (int, str): If unit='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
entities: Optional constraints used to limit what gets returned.
Returns: | [
"Retrieve",
"variable",
"data",
"for",
"a",
"specified",
"level",
"in",
"the",
"Dataset",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/entities.py#L65-L118 | train |
bids-standard/pybids | bids/variables/entities.py | NodeIndex.get_or_create_node | def get_or_create_node(self, level, entities, *args, **kwargs):
''' Retrieves a child Node based on the specified criteria, creating a
new Node if necessary.
Args:
entities (dict): Dictionary of entities specifying which Node to
return.
args, kwargs: Optional positional or named arguments to pass onto
class-specific initializers. These arguments are only used if
a Node that matches the passed entities doesn't already exist,
and a new one must be created.
Returns:
A Node instance.
'''
result = self.get_nodes(level, entities)
if result:
if len(result) > 1:
raise ValueError("More than one matching Node found! If you're"
" expecting more than one Node, use "
"get_nodes() instead of get_or_create_node()."
)
return result[0]
# Create Node
if level == 'run':
node = RunNode(entities, *args, **kwargs)
else:
node = Node(level, entities)
entities = dict(entities, node_index=len(self.nodes), level=level)
self.nodes.append(node)
node_row = pd.Series(entities)
self.index = self.index.append(node_row, ignore_index=True)
return node | python | def get_or_create_node(self, level, entities, *args, **kwargs):
''' Retrieves a child Node based on the specified criteria, creating a
new Node if necessary.
Args:
entities (dict): Dictionary of entities specifying which Node to
return.
args, kwargs: Optional positional or named arguments to pass onto
class-specific initializers. These arguments are only used if
a Node that matches the passed entities doesn't already exist,
and a new one must be created.
Returns:
A Node instance.
'''
result = self.get_nodes(level, entities)
if result:
if len(result) > 1:
raise ValueError("More than one matching Node found! If you're"
" expecting more than one Node, use "
"get_nodes() instead of get_or_create_node()."
)
return result[0]
# Create Node
if level == 'run':
node = RunNode(entities, *args, **kwargs)
else:
node = Node(level, entities)
entities = dict(entities, node_index=len(self.nodes), level=level)
self.nodes.append(node)
node_row = pd.Series(entities)
self.index = self.index.append(node_row, ignore_index=True)
return node | [
"def",
"get_or_create_node",
"(",
"self",
",",
"level",
",",
"entities",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"get_nodes",
"(",
"level",
",",
"entities",
")",
"if",
"result",
":",
"if",
"len",
"(",
"result",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"More than one matching Node found! If you're\"",
"\" expecting more than one Node, use \"",
"\"get_nodes() instead of get_or_create_node().\"",
")",
"return",
"result",
"[",
"0",
"]",
"# Create Node",
"if",
"level",
"==",
"'run'",
":",
"node",
"=",
"RunNode",
"(",
"entities",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"node",
"=",
"Node",
"(",
"level",
",",
"entities",
")",
"entities",
"=",
"dict",
"(",
"entities",
",",
"node_index",
"=",
"len",
"(",
"self",
".",
"nodes",
")",
",",
"level",
"=",
"level",
")",
"self",
".",
"nodes",
".",
"append",
"(",
"node",
")",
"node_row",
"=",
"pd",
".",
"Series",
"(",
"entities",
")",
"self",
".",
"index",
"=",
"self",
".",
"index",
".",
"append",
"(",
"node_row",
",",
"ignore_index",
"=",
"True",
")",
"return",
"node"
] | Retrieves a child Node based on the specified criteria, creating a
new Node if necessary.
Args:
entities (dict): Dictionary of entities specifying which Node to
return.
args, kwargs: Optional positional or named arguments to pass onto
class-specific initializers. These arguments are only used if
a Node that matches the passed entities doesn't already exist,
and a new one must be created.
Returns:
A Node instance. | [
"Retrieves",
"a",
"child",
"Node",
"based",
"on",
"the",
"specified",
"criteria",
"creating",
"a",
"new",
"Node",
"if",
"necessary",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/entities.py#L160-L198 | train |
bids-standard/pybids | bids/variables/kollekshuns.py | merge_collections | def merge_collections(collections, force_dense=False, sampling_rate='auto'):
''' Merge two or more collections at the same level of analysis.
Args:
collections (list): List of Collections to merge.
sampling_rate (int, str): Sampling rate to use if it becomes necessary
to resample DenseRunVariables. Either an integer or 'auto' (see
merge_variables docstring for further explanation).
Returns:
A BIDSVariableCollection or BIDSRunVariableCollection, depending
on the type of the input collections.
'''
if len(listify(collections)) == 1:
return collections
levels = set([c.level for c in collections])
if len(levels) > 1:
raise ValueError("At the moment, it's only possible to merge "
"Collections at the same level of analysis. You "
"passed collections at levels: %s." % levels)
variables = list(chain(*[c.variables.values() for c in collections]))
cls = collections[0].__class__
variables = cls.merge_variables(variables, sampling_rate=sampling_rate)
if isinstance(collections[0], BIDSRunVariableCollection):
if sampling_rate == 'auto':
rates = [var.sampling_rate for var in variables
if isinstance(var, DenseRunVariable)]
sampling_rate = rates[0] if rates else None
return cls(variables, sampling_rate)
return cls(variables) | python | def merge_collections(collections, force_dense=False, sampling_rate='auto'):
''' Merge two or more collections at the same level of analysis.
Args:
collections (list): List of Collections to merge.
sampling_rate (int, str): Sampling rate to use if it becomes necessary
to resample DenseRunVariables. Either an integer or 'auto' (see
merge_variables docstring for further explanation).
Returns:
A BIDSVariableCollection or BIDSRunVariableCollection, depending
on the type of the input collections.
'''
if len(listify(collections)) == 1:
return collections
levels = set([c.level for c in collections])
if len(levels) > 1:
raise ValueError("At the moment, it's only possible to merge "
"Collections at the same level of analysis. You "
"passed collections at levels: %s." % levels)
variables = list(chain(*[c.variables.values() for c in collections]))
cls = collections[0].__class__
variables = cls.merge_variables(variables, sampling_rate=sampling_rate)
if isinstance(collections[0], BIDSRunVariableCollection):
if sampling_rate == 'auto':
rates = [var.sampling_rate for var in variables
if isinstance(var, DenseRunVariable)]
sampling_rate = rates[0] if rates else None
return cls(variables, sampling_rate)
return cls(variables) | [
"def",
"merge_collections",
"(",
"collections",
",",
"force_dense",
"=",
"False",
",",
"sampling_rate",
"=",
"'auto'",
")",
":",
"if",
"len",
"(",
"listify",
"(",
"collections",
")",
")",
"==",
"1",
":",
"return",
"collections",
"levels",
"=",
"set",
"(",
"[",
"c",
".",
"level",
"for",
"c",
"in",
"collections",
"]",
")",
"if",
"len",
"(",
"levels",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"At the moment, it's only possible to merge \"",
"\"Collections at the same level of analysis. You \"",
"\"passed collections at levels: %s.\"",
"%",
"levels",
")",
"variables",
"=",
"list",
"(",
"chain",
"(",
"*",
"[",
"c",
".",
"variables",
".",
"values",
"(",
")",
"for",
"c",
"in",
"collections",
"]",
")",
")",
"cls",
"=",
"collections",
"[",
"0",
"]",
".",
"__class__",
"variables",
"=",
"cls",
".",
"merge_variables",
"(",
"variables",
",",
"sampling_rate",
"=",
"sampling_rate",
")",
"if",
"isinstance",
"(",
"collections",
"[",
"0",
"]",
",",
"BIDSRunVariableCollection",
")",
":",
"if",
"sampling_rate",
"==",
"'auto'",
":",
"rates",
"=",
"[",
"var",
".",
"sampling_rate",
"for",
"var",
"in",
"variables",
"if",
"isinstance",
"(",
"var",
",",
"DenseRunVariable",
")",
"]",
"sampling_rate",
"=",
"rates",
"[",
"0",
"]",
"if",
"rates",
"else",
"None",
"return",
"cls",
"(",
"variables",
",",
"sampling_rate",
")",
"return",
"cls",
"(",
"variables",
")"
] | Merge two or more collections at the same level of analysis.
Args:
collections (list): List of Collections to merge.
sampling_rate (int, str): Sampling rate to use if it becomes necessary
to resample DenseRunVariables. Either an integer or 'auto' (see
merge_variables docstring for further explanation).
Returns:
A BIDSVariableCollection or BIDSRunVariableCollection, depending
on the type of the input collections. | [
"Merge",
"two",
"or",
"more",
"collections",
"at",
"the",
"same",
"level",
"of",
"analysis",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L354-L390 | train |
bids-standard/pybids | bids/variables/kollekshuns.py | BIDSVariableCollection.merge_variables | def merge_variables(variables, **kwargs):
''' Concatenates Variables along row axis.
Args:
variables (list): List of Variables to merge. Variables can have
different names (and all Variables that share a name will be
concatenated together).
Returns:
A list of Variables.
'''
var_dict = OrderedDict()
for v in variables:
if v.name not in var_dict:
var_dict[v.name] = []
var_dict[v.name].append(v)
return [merge_variables(vars_, **kwargs)
for vars_ in list(var_dict.values())] | python | def merge_variables(variables, **kwargs):
''' Concatenates Variables along row axis.
Args:
variables (list): List of Variables to merge. Variables can have
different names (and all Variables that share a name will be
concatenated together).
Returns:
A list of Variables.
'''
var_dict = OrderedDict()
for v in variables:
if v.name not in var_dict:
var_dict[v.name] = []
var_dict[v.name].append(v)
return [merge_variables(vars_, **kwargs)
for vars_ in list(var_dict.values())] | [
"def",
"merge_variables",
"(",
"variables",
",",
"*",
"*",
"kwargs",
")",
":",
"var_dict",
"=",
"OrderedDict",
"(",
")",
"for",
"v",
"in",
"variables",
":",
"if",
"v",
".",
"name",
"not",
"in",
"var_dict",
":",
"var_dict",
"[",
"v",
".",
"name",
"]",
"=",
"[",
"]",
"var_dict",
"[",
"v",
".",
"name",
"]",
".",
"append",
"(",
"v",
")",
"return",
"[",
"merge_variables",
"(",
"vars_",
",",
"*",
"*",
"kwargs",
")",
"for",
"vars_",
"in",
"list",
"(",
"var_dict",
".",
"values",
"(",
")",
")",
"]"
] | Concatenates Variables along row axis.
Args:
variables (list): List of Variables to merge. Variables can have
different names (and all Variables that share a name will be
concatenated together).
Returns:
A list of Variables. | [
"Concatenates",
"Variables",
"along",
"row",
"axis",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L69-L86 | train |
bids-standard/pybids | bids/variables/kollekshuns.py | BIDSVariableCollection.to_df | def to_df(self, variables=None, format='wide', fillna=np.nan, **kwargs):
''' Merge variables into a single pandas DataFrame.
Args:
variables (list): Optional list of column names to retain; if None,
all variables are returned.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
fillna: Replace missing values with the specified value.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
Returns: A pandas DataFrame.
'''
if variables is None:
variables = list(self.variables.keys())
# Can receive already-selected Variables from sub-classes
if not isinstance(variables[0], BIDSVariable):
variables = [v for v in self.variables.values()
if v.name in variables]
dfs = [v.to_df(**kwargs) for v in variables]
df = pd.concat(dfs, axis=0, sort=True)
if format == 'long':
return df.reset_index(drop=True).fillna(fillna)
ind_cols = list(set(df.columns) - {'condition', 'amplitude'})
df['amplitude'] = df['amplitude'].fillna('n/a')
df = df.pivot_table(index=ind_cols, columns='condition',
values='amplitude', aggfunc='first')
df = df.reset_index().replace('n/a', fillna)
df.columns.name = None
return df | python | def to_df(self, variables=None, format='wide', fillna=np.nan, **kwargs):
''' Merge variables into a single pandas DataFrame.
Args:
variables (list): Optional list of column names to retain; if None,
all variables are returned.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
fillna: Replace missing values with the specified value.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
Returns: A pandas DataFrame.
'''
if variables is None:
variables = list(self.variables.keys())
# Can receive already-selected Variables from sub-classes
if not isinstance(variables[0], BIDSVariable):
variables = [v for v in self.variables.values()
if v.name in variables]
dfs = [v.to_df(**kwargs) for v in variables]
df = pd.concat(dfs, axis=0, sort=True)
if format == 'long':
return df.reset_index(drop=True).fillna(fillna)
ind_cols = list(set(df.columns) - {'condition', 'amplitude'})
df['amplitude'] = df['amplitude'].fillna('n/a')
df = df.pivot_table(index=ind_cols, columns='condition',
values='amplitude', aggfunc='first')
df = df.reset_index().replace('n/a', fillna)
df.columns.name = None
return df | [
"def",
"to_df",
"(",
"self",
",",
"variables",
"=",
"None",
",",
"format",
"=",
"'wide'",
",",
"fillna",
"=",
"np",
".",
"nan",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"variables",
"is",
"None",
":",
"variables",
"=",
"list",
"(",
"self",
".",
"variables",
".",
"keys",
"(",
")",
")",
"# Can receive already-selected Variables from sub-classes",
"if",
"not",
"isinstance",
"(",
"variables",
"[",
"0",
"]",
",",
"BIDSVariable",
")",
":",
"variables",
"=",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"variables",
".",
"values",
"(",
")",
"if",
"v",
".",
"name",
"in",
"variables",
"]",
"dfs",
"=",
"[",
"v",
".",
"to_df",
"(",
"*",
"*",
"kwargs",
")",
"for",
"v",
"in",
"variables",
"]",
"df",
"=",
"pd",
".",
"concat",
"(",
"dfs",
",",
"axis",
"=",
"0",
",",
"sort",
"=",
"True",
")",
"if",
"format",
"==",
"'long'",
":",
"return",
"df",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
".",
"fillna",
"(",
"fillna",
")",
"ind_cols",
"=",
"list",
"(",
"set",
"(",
"df",
".",
"columns",
")",
"-",
"{",
"'condition'",
",",
"'amplitude'",
"}",
")",
"df",
"[",
"'amplitude'",
"]",
"=",
"df",
"[",
"'amplitude'",
"]",
".",
"fillna",
"(",
"'n/a'",
")",
"df",
"=",
"df",
".",
"pivot_table",
"(",
"index",
"=",
"ind_cols",
",",
"columns",
"=",
"'condition'",
",",
"values",
"=",
"'amplitude'",
",",
"aggfunc",
"=",
"'first'",
")",
"df",
"=",
"df",
".",
"reset_index",
"(",
")",
".",
"replace",
"(",
"'n/a'",
",",
"fillna",
")",
"df",
".",
"columns",
".",
"name",
"=",
"None",
"return",
"df"
] | Merge variables into a single pandas DataFrame.
Args:
variables (list): Optional list of column names to retain; if None,
all variables are returned.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
fillna: Replace missing values with the specified value.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
Returns: A pandas DataFrame. | [
"Merge",
"variables",
"into",
"a",
"single",
"pandas",
"DataFrame",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L88-L128 | train |
bids-standard/pybids | bids/variables/kollekshuns.py | BIDSVariableCollection.from_df | def from_df(cls, data, entities=None, source='contrast'):
''' Create a Collection from a pandas DataFrame.
Args:
df (DataFrame): The DataFrame to convert to a Collection. Each
column will be converted to a SimpleVariable.
entities (DataFrame): An optional second DataFrame containing
entity information.
source (str): The value to set as the source for all Variables.
Returns:
A BIDSVariableCollection.
'''
variables = []
for col in data.columns:
_data = pd.DataFrame(data[col].values, columns=['amplitude'])
if entities is not None:
_data = pd.concat([_data, entities], axis=1, sort=True)
variables.append(SimpleVariable(name=col, data=_data, source=source))
return BIDSVariableCollection(variables) | python | def from_df(cls, data, entities=None, source='contrast'):
''' Create a Collection from a pandas DataFrame.
Args:
df (DataFrame): The DataFrame to convert to a Collection. Each
column will be converted to a SimpleVariable.
entities (DataFrame): An optional second DataFrame containing
entity information.
source (str): The value to set as the source for all Variables.
Returns:
A BIDSVariableCollection.
'''
variables = []
for col in data.columns:
_data = pd.DataFrame(data[col].values, columns=['amplitude'])
if entities is not None:
_data = pd.concat([_data, entities], axis=1, sort=True)
variables.append(SimpleVariable(name=col, data=_data, source=source))
return BIDSVariableCollection(variables) | [
"def",
"from_df",
"(",
"cls",
",",
"data",
",",
"entities",
"=",
"None",
",",
"source",
"=",
"'contrast'",
")",
":",
"variables",
"=",
"[",
"]",
"for",
"col",
"in",
"data",
".",
"columns",
":",
"_data",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"[",
"col",
"]",
".",
"values",
",",
"columns",
"=",
"[",
"'amplitude'",
"]",
")",
"if",
"entities",
"is",
"not",
"None",
":",
"_data",
"=",
"pd",
".",
"concat",
"(",
"[",
"_data",
",",
"entities",
"]",
",",
"axis",
"=",
"1",
",",
"sort",
"=",
"True",
")",
"variables",
".",
"append",
"(",
"SimpleVariable",
"(",
"name",
"=",
"col",
",",
"data",
"=",
"_data",
",",
"source",
"=",
"source",
")",
")",
"return",
"BIDSVariableCollection",
"(",
"variables",
")"
] | Create a Collection from a pandas DataFrame.
Args:
df (DataFrame): The DataFrame to convert to a Collection. Each
column will be converted to a SimpleVariable.
entities (DataFrame): An optional second DataFrame containing
entity information.
source (str): The value to set as the source for all Variables.
Returns:
A BIDSVariableCollection. | [
"Create",
"a",
"Collection",
"from",
"a",
"pandas",
"DataFrame",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L131-L150 | train |
bids-standard/pybids | bids/variables/kollekshuns.py | BIDSVariableCollection.clone | def clone(self):
''' Returns a shallow copy of the current instance, except that all
variables are deep-cloned.
'''
clone = copy(self)
clone.variables = {k: v.clone() for (k, v) in self.variables.items()}
return clone | python | def clone(self):
''' Returns a shallow copy of the current instance, except that all
variables are deep-cloned.
'''
clone = copy(self)
clone.variables = {k: v.clone() for (k, v) in self.variables.items()}
return clone | [
"def",
"clone",
"(",
"self",
")",
":",
"clone",
"=",
"copy",
"(",
"self",
")",
"clone",
".",
"variables",
"=",
"{",
"k",
":",
"v",
".",
"clone",
"(",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"self",
".",
"variables",
".",
"items",
"(",
")",
"}",
"return",
"clone"
] | Returns a shallow copy of the current instance, except that all
variables are deep-cloned. | [
"Returns",
"a",
"shallow",
"copy",
"of",
"the",
"current",
"instance",
"except",
"that",
"all",
"variables",
"are",
"deep",
"-",
"cloned",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L152-L158 | train |
bids-standard/pybids | bids/variables/kollekshuns.py | BIDSVariableCollection._index_entities | def _index_entities(self):
''' Sets current instance's entities based on the existing index.
Note: Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents.
'''
all_ents = pd.DataFrame.from_records(
[v.entities for v in self.variables.values()])
constant = all_ents.apply(lambda x: x.nunique() == 1)
if constant.empty:
self.entities = {}
else:
keep = all_ents.columns[constant]
ents = {k: all_ents[k].dropna().iloc[0] for k in keep}
self.entities = {k: v for k, v in ents.items() if pd.notnull(v)} | python | def _index_entities(self):
''' Sets current instance's entities based on the existing index.
Note: Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents.
'''
all_ents = pd.DataFrame.from_records(
[v.entities for v in self.variables.values()])
constant = all_ents.apply(lambda x: x.nunique() == 1)
if constant.empty:
self.entities = {}
else:
keep = all_ents.columns[constant]
ents = {k: all_ents[k].dropna().iloc[0] for k in keep}
self.entities = {k: v for k, v in ents.items() if pd.notnull(v)} | [
"def",
"_index_entities",
"(",
"self",
")",
":",
"all_ents",
"=",
"pd",
".",
"DataFrame",
".",
"from_records",
"(",
"[",
"v",
".",
"entities",
"for",
"v",
"in",
"self",
".",
"variables",
".",
"values",
"(",
")",
"]",
")",
"constant",
"=",
"all_ents",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"nunique",
"(",
")",
"==",
"1",
")",
"if",
"constant",
".",
"empty",
":",
"self",
".",
"entities",
"=",
"{",
"}",
"else",
":",
"keep",
"=",
"all_ents",
".",
"columns",
"[",
"constant",
"]",
"ents",
"=",
"{",
"k",
":",
"all_ents",
"[",
"k",
"]",
".",
"dropna",
"(",
")",
".",
"iloc",
"[",
"0",
"]",
"for",
"k",
"in",
"keep",
"}",
"self",
".",
"entities",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"ents",
".",
"items",
"(",
")",
"if",
"pd",
".",
"notnull",
"(",
"v",
")",
"}"
] | Sets current instance's entities based on the existing index.
Note: Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents. | [
"Sets",
"current",
"instance",
"s",
"entities",
"based",
"on",
"the",
"existing",
"index",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L164-L181 | train |
bids-standard/pybids | bids/variables/kollekshuns.py | BIDSVariableCollection.match_variables | def match_variables(self, pattern, return_type='name'):
''' Return columns whose names match the provided regex pattern.
Args:
pattern (str): A regex pattern to match all variable names against.
return_type (str): What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match.
'''
pattern = re.compile(pattern)
vars_ = [v for v in self.variables.values() if pattern.search(v.name)]
return vars_ if return_type.startswith('var') \
else [v.name for v in vars_] | python | def match_variables(self, pattern, return_type='name'):
''' Return columns whose names match the provided regex pattern.
Args:
pattern (str): A regex pattern to match all variable names against.
return_type (str): What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match.
'''
pattern = re.compile(pattern)
vars_ = [v for v in self.variables.values() if pattern.search(v.name)]
return vars_ if return_type.startswith('var') \
else [v.name for v in vars_] | [
"def",
"match_variables",
"(",
"self",
",",
"pattern",
",",
"return_type",
"=",
"'name'",
")",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"pattern",
")",
"vars_",
"=",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"variables",
".",
"values",
"(",
")",
"if",
"pattern",
".",
"search",
"(",
"v",
".",
"name",
")",
"]",
"return",
"vars_",
"if",
"return_type",
".",
"startswith",
"(",
"'var'",
")",
"else",
"[",
"v",
".",
"name",
"for",
"v",
"in",
"vars_",
"]"
] | Return columns whose names match the provided regex pattern.
Args:
pattern (str): A regex pattern to match all variable names against.
return_type (str): What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match. | [
"Return",
"columns",
"whose",
"names",
"match",
"the",
"provided",
"regex",
"pattern",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L196-L209 | train |
bids-standard/pybids | bids/variables/kollekshuns.py | BIDSRunVariableCollection.to_df | def to_df(self, variables=None, format='wide', sparse=True,
sampling_rate=None, include_sparse=True, include_dense=True,
**kwargs):
''' Merge columns into a single pandas DataFrame.
Args:
variables (list): Optional list of variable names to retain;
if None, all variables are written out.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
sparse (bool): If True, variables will be kept in a sparse
format provided they are all internally represented as such.
If False, a dense matrix (i.e., uniform sampling rate for all
events) will be exported. Will be ignored if at least one
variable is dense.
sampling_rate (float): If a dense matrix is written out, the
sampling rate (in Hz) to use for downsampling. Defaults to the
value currently set in the instance.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
include_sparse (bool): Whether or not to include sparse Variables.
include_dense (bool): Whether or not to include dense Variables.
Returns: A pandas DataFrame.
'''
if not include_sparse and not include_dense:
raise ValueError("You can't exclude both dense and sparse "
"variables! That leaves nothing!")
if variables is None:
variables = list(self.variables.keys())
if not include_sparse:
variables = [v for v in variables if
isinstance(self.variables[v], DenseRunVariable)]
if not include_dense:
variables = [v for v in variables if not
isinstance(self.variables[v], DenseRunVariable)]
if not variables:
return None
_vars = [self.variables[v] for v in variables]
if sparse and all(isinstance(v, SimpleVariable) for v in _vars):
variables = _vars
else:
sampling_rate = sampling_rate or self.sampling_rate
# Make sure all variables have the same sampling rate
variables = list(self.resample(sampling_rate, variables,
force_dense=True,
in_place=False).values())
return super(BIDSRunVariableCollection, self).to_df(variables, format,
**kwargs) | python | def to_df(self, variables=None, format='wide', sparse=True,
sampling_rate=None, include_sparse=True, include_dense=True,
**kwargs):
''' Merge columns into a single pandas DataFrame.
Args:
variables (list): Optional list of variable names to retain;
if None, all variables are written out.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
sparse (bool): If True, variables will be kept in a sparse
format provided they are all internally represented as such.
If False, a dense matrix (i.e., uniform sampling rate for all
events) will be exported. Will be ignored if at least one
variable is dense.
sampling_rate (float): If a dense matrix is written out, the
sampling rate (in Hz) to use for downsampling. Defaults to the
value currently set in the instance.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
include_sparse (bool): Whether or not to include sparse Variables.
include_dense (bool): Whether or not to include dense Variables.
Returns: A pandas DataFrame.
'''
if not include_sparse and not include_dense:
raise ValueError("You can't exclude both dense and sparse "
"variables! That leaves nothing!")
if variables is None:
variables = list(self.variables.keys())
if not include_sparse:
variables = [v for v in variables if
isinstance(self.variables[v], DenseRunVariable)]
if not include_dense:
variables = [v for v in variables if not
isinstance(self.variables[v], DenseRunVariable)]
if not variables:
return None
_vars = [self.variables[v] for v in variables]
if sparse and all(isinstance(v, SimpleVariable) for v in _vars):
variables = _vars
else:
sampling_rate = sampling_rate or self.sampling_rate
# Make sure all variables have the same sampling rate
variables = list(self.resample(sampling_rate, variables,
force_dense=True,
in_place=False).values())
return super(BIDSRunVariableCollection, self).to_df(variables, format,
**kwargs) | [
"def",
"to_df",
"(",
"self",
",",
"variables",
"=",
"None",
",",
"format",
"=",
"'wide'",
",",
"sparse",
"=",
"True",
",",
"sampling_rate",
"=",
"None",
",",
"include_sparse",
"=",
"True",
",",
"include_dense",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"include_sparse",
"and",
"not",
"include_dense",
":",
"raise",
"ValueError",
"(",
"\"You can't exclude both dense and sparse \"",
"\"variables! That leaves nothing!\"",
")",
"if",
"variables",
"is",
"None",
":",
"variables",
"=",
"list",
"(",
"self",
".",
"variables",
".",
"keys",
"(",
")",
")",
"if",
"not",
"include_sparse",
":",
"variables",
"=",
"[",
"v",
"for",
"v",
"in",
"variables",
"if",
"isinstance",
"(",
"self",
".",
"variables",
"[",
"v",
"]",
",",
"DenseRunVariable",
")",
"]",
"if",
"not",
"include_dense",
":",
"variables",
"=",
"[",
"v",
"for",
"v",
"in",
"variables",
"if",
"not",
"isinstance",
"(",
"self",
".",
"variables",
"[",
"v",
"]",
",",
"DenseRunVariable",
")",
"]",
"if",
"not",
"variables",
":",
"return",
"None",
"_vars",
"=",
"[",
"self",
".",
"variables",
"[",
"v",
"]",
"for",
"v",
"in",
"variables",
"]",
"if",
"sparse",
"and",
"all",
"(",
"isinstance",
"(",
"v",
",",
"SimpleVariable",
")",
"for",
"v",
"in",
"_vars",
")",
":",
"variables",
"=",
"_vars",
"else",
":",
"sampling_rate",
"=",
"sampling_rate",
"or",
"self",
".",
"sampling_rate",
"# Make sure all variables have the same sampling rate",
"variables",
"=",
"list",
"(",
"self",
".",
"resample",
"(",
"sampling_rate",
",",
"variables",
",",
"force_dense",
"=",
"True",
",",
"in_place",
"=",
"False",
")",
".",
"values",
"(",
")",
")",
"return",
"super",
"(",
"BIDSRunVariableCollection",
",",
"self",
")",
".",
"to_df",
"(",
"variables",
",",
"format",
",",
"*",
"*",
"kwargs",
")"
] | Merge columns into a single pandas DataFrame.
Args:
variables (list): Optional list of variable names to retain;
if None, all variables are written out.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
sparse (bool): If True, variables will be kept in a sparse
format provided they are all internally represented as such.
If False, a dense matrix (i.e., uniform sampling rate for all
events) will be exported. Will be ignored if at least one
variable is dense.
sampling_rate (float): If a dense matrix is written out, the
sampling rate (in Hz) to use for downsampling. Defaults to the
value currently set in the instance.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
include_sparse (bool): Whether or not to include sparse Variables.
include_dense (bool): Whether or not to include dense Variables.
Returns: A pandas DataFrame. | [
"Merge",
"columns",
"into",
"a",
"single",
"pandas",
"DataFrame",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L290-L351 | train |
bids-standard/pybids | bids/analysis/transformations/munge.py | Rename._transform | def _transform(self, var):
''' Rename happens automatically in the base class, so all we need to
do is unset the original variable in the collection. '''
self.collection.variables.pop(var.name)
return var.values | python | def _transform(self, var):
''' Rename happens automatically in the base class, so all we need to
do is unset the original variable in the collection. '''
self.collection.variables.pop(var.name)
return var.values | [
"def",
"_transform",
"(",
"self",
",",
"var",
")",
":",
"self",
".",
"collection",
".",
"variables",
".",
"pop",
"(",
"var",
".",
"name",
")",
"return",
"var",
".",
"values"
] | Rename happens automatically in the base class, so all we need to
do is unset the original variable in the collection. | [
"Rename",
"happens",
"automatically",
"in",
"the",
"base",
"class",
"so",
"all",
"we",
"need",
"to",
"do",
"is",
"unset",
"the",
"original",
"variable",
"in",
"the",
"collection",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/transformations/munge.py#L215-L219 | train |
bids-standard/pybids | bids/layout/writing.py | replace_entities | def replace_entities(entities, pattern):
"""
Replaces all entity names in a given pattern with the corresponding
values provided by entities.
Args:
entities (dict): A dictionary mapping entity names to entity values.
pattern (str): A path pattern that contains entity names denoted
by curly braces. Optional portions denoted by square braces.
For example: 'sub-{subject}/[var-{name}/]{id}.csv'
Accepted entity values, using regex matching, denoted within angle
brackets.
For example: 'sub-{subject<01|02>}/{task}.csv'
Returns:
A new string with the entity values inserted where entity names
were denoted in the provided pattern.
"""
ents = re.findall(r'\{(.*?)\}', pattern)
new_path = pattern
for ent in ents:
match = re.search(r'([^|<]+)(<.*?>)?(\|.*)?', ent)
if match is None:
return None
name, valid, default = match.groups()
default = default[1:] if default is not None else default
if name in entities and valid is not None:
ent_val = str(entities[name])
if not re.match(valid[1:-1], ent_val):
if default is None:
return None
entities[name] = default
ent_val = entities.get(name, default)
if ent_val is None:
return None
new_path = new_path.replace('{%s}' % ent, str(ent_val))
return new_path | python | def replace_entities(entities, pattern):
"""
Replaces all entity names in a given pattern with the corresponding
values provided by entities.
Args:
entities (dict): A dictionary mapping entity names to entity values.
pattern (str): A path pattern that contains entity names denoted
by curly braces. Optional portions denoted by square braces.
For example: 'sub-{subject}/[var-{name}/]{id}.csv'
Accepted entity values, using regex matching, denoted within angle
brackets.
For example: 'sub-{subject<01|02>}/{task}.csv'
Returns:
A new string with the entity values inserted where entity names
were denoted in the provided pattern.
"""
ents = re.findall(r'\{(.*?)\}', pattern)
new_path = pattern
for ent in ents:
match = re.search(r'([^|<]+)(<.*?>)?(\|.*)?', ent)
if match is None:
return None
name, valid, default = match.groups()
default = default[1:] if default is not None else default
if name in entities and valid is not None:
ent_val = str(entities[name])
if not re.match(valid[1:-1], ent_val):
if default is None:
return None
entities[name] = default
ent_val = entities.get(name, default)
if ent_val is None:
return None
new_path = new_path.replace('{%s}' % ent, str(ent_val))
return new_path | [
"def",
"replace_entities",
"(",
"entities",
",",
"pattern",
")",
":",
"ents",
"=",
"re",
".",
"findall",
"(",
"r'\\{(.*?)\\}'",
",",
"pattern",
")",
"new_path",
"=",
"pattern",
"for",
"ent",
"in",
"ents",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'([^|<]+)(<.*?>)?(\\|.*)?'",
",",
"ent",
")",
"if",
"match",
"is",
"None",
":",
"return",
"None",
"name",
",",
"valid",
",",
"default",
"=",
"match",
".",
"groups",
"(",
")",
"default",
"=",
"default",
"[",
"1",
":",
"]",
"if",
"default",
"is",
"not",
"None",
"else",
"default",
"if",
"name",
"in",
"entities",
"and",
"valid",
"is",
"not",
"None",
":",
"ent_val",
"=",
"str",
"(",
"entities",
"[",
"name",
"]",
")",
"if",
"not",
"re",
".",
"match",
"(",
"valid",
"[",
"1",
":",
"-",
"1",
"]",
",",
"ent_val",
")",
":",
"if",
"default",
"is",
"None",
":",
"return",
"None",
"entities",
"[",
"name",
"]",
"=",
"default",
"ent_val",
"=",
"entities",
".",
"get",
"(",
"name",
",",
"default",
")",
"if",
"ent_val",
"is",
"None",
":",
"return",
"None",
"new_path",
"=",
"new_path",
".",
"replace",
"(",
"'{%s}'",
"%",
"ent",
",",
"str",
"(",
"ent_val",
")",
")",
"return",
"new_path"
] | Replaces all entity names in a given pattern with the corresponding
values provided by entities.
Args:
entities (dict): A dictionary mapping entity names to entity values.
pattern (str): A path pattern that contains entity names denoted
by curly braces. Optional portions denoted by square braces.
For example: 'sub-{subject}/[var-{name}/]{id}.csv'
Accepted entity values, using regex matching, denoted within angle
brackets.
For example: 'sub-{subject<01|02>}/{task}.csv'
Returns:
A new string with the entity values inserted where entity names
were denoted in the provided pattern. | [
"Replaces",
"all",
"entity",
"names",
"in",
"a",
"given",
"pattern",
"with",
"the",
"corresponding",
"values",
"provided",
"by",
"entities",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/writing.py#L16-L55 | train |
bids-standard/pybids | bids/layout/writing.py | write_contents_to_file | def write_contents_to_file(path, contents=None, link_to=None,
content_mode='text', root=None, conflicts='fail'):
"""
Uses provided filename patterns to write contents to a new path, given
a corresponding entity map.
Args:
path (str): Destination path of the desired contents.
contents (str): Raw text or binary encoded string of contents to write
to the new path.
link_to (str): Optional path with which to create a symbolic link to.
Used as an alternative to and takes priority over the contents
argument.
content_mode (str): Either 'text' or 'binary' to indicate the writing
mode for the new file. Only relevant if contents is provided.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
"""
if root is None and not isabs(path):
root = os.getcwd()
if root:
path = join(root, path)
if exists(path) or islink(path):
if conflicts == 'fail':
msg = 'A file at path {} already exists.'
raise ValueError(msg.format(path))
elif conflicts == 'skip':
msg = 'A file at path {} already exists, skipping writing file.'
logging.warn(msg.format(path))
return
elif conflicts == 'overwrite':
if isdir(path):
logging.warn('New path is a directory, not going to '
'overwrite it, skipping instead.')
return
os.remove(path)
elif conflicts == 'append':
i = 1
while i < sys.maxsize:
path_splits = splitext(path)
path_splits[0] = path_splits[0] + '_%d' % i
appended_filename = os.extsep.join(path_splits)
if not exists(appended_filename) and \
not islink(appended_filename):
path = appended_filename
break
i += 1
else:
raise ValueError('Did not provide a valid conflicts parameter')
if not exists(dirname(path)):
os.makedirs(dirname(path))
if link_to:
os.symlink(link_to, path)
elif contents:
mode = 'wb' if content_mode == 'binary' else 'w'
with open(path, mode) as f:
f.write(contents)
else:
raise ValueError('One of contents or link_to must be provided.') | python | def write_contents_to_file(path, contents=None, link_to=None,
content_mode='text', root=None, conflicts='fail'):
"""
Uses provided filename patterns to write contents to a new path, given
a corresponding entity map.
Args:
path (str): Destination path of the desired contents.
contents (str): Raw text or binary encoded string of contents to write
to the new path.
link_to (str): Optional path with which to create a symbolic link to.
Used as an alternative to and takes priority over the contents
argument.
content_mode (str): Either 'text' or 'binary' to indicate the writing
mode for the new file. Only relevant if contents is provided.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
"""
if root is None and not isabs(path):
root = os.getcwd()
if root:
path = join(root, path)
if exists(path) or islink(path):
if conflicts == 'fail':
msg = 'A file at path {} already exists.'
raise ValueError(msg.format(path))
elif conflicts == 'skip':
msg = 'A file at path {} already exists, skipping writing file.'
logging.warn(msg.format(path))
return
elif conflicts == 'overwrite':
if isdir(path):
logging.warn('New path is a directory, not going to '
'overwrite it, skipping instead.')
return
os.remove(path)
elif conflicts == 'append':
i = 1
while i < sys.maxsize:
path_splits = splitext(path)
path_splits[0] = path_splits[0] + '_%d' % i
appended_filename = os.extsep.join(path_splits)
if not exists(appended_filename) and \
not islink(appended_filename):
path = appended_filename
break
i += 1
else:
raise ValueError('Did not provide a valid conflicts parameter')
if not exists(dirname(path)):
os.makedirs(dirname(path))
if link_to:
os.symlink(link_to, path)
elif contents:
mode = 'wb' if content_mode == 'binary' else 'w'
with open(path, mode) as f:
f.write(contents)
else:
raise ValueError('One of contents or link_to must be provided.') | [
"def",
"write_contents_to_file",
"(",
"path",
",",
"contents",
"=",
"None",
",",
"link_to",
"=",
"None",
",",
"content_mode",
"=",
"'text'",
",",
"root",
"=",
"None",
",",
"conflicts",
"=",
"'fail'",
")",
":",
"if",
"root",
"is",
"None",
"and",
"not",
"isabs",
"(",
"path",
")",
":",
"root",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"root",
":",
"path",
"=",
"join",
"(",
"root",
",",
"path",
")",
"if",
"exists",
"(",
"path",
")",
"or",
"islink",
"(",
"path",
")",
":",
"if",
"conflicts",
"==",
"'fail'",
":",
"msg",
"=",
"'A file at path {} already exists.'",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"path",
")",
")",
"elif",
"conflicts",
"==",
"'skip'",
":",
"msg",
"=",
"'A file at path {} already exists, skipping writing file.'",
"logging",
".",
"warn",
"(",
"msg",
".",
"format",
"(",
"path",
")",
")",
"return",
"elif",
"conflicts",
"==",
"'overwrite'",
":",
"if",
"isdir",
"(",
"path",
")",
":",
"logging",
".",
"warn",
"(",
"'New path is a directory, not going to '",
"'overwrite it, skipping instead.'",
")",
"return",
"os",
".",
"remove",
"(",
"path",
")",
"elif",
"conflicts",
"==",
"'append'",
":",
"i",
"=",
"1",
"while",
"i",
"<",
"sys",
".",
"maxsize",
":",
"path_splits",
"=",
"splitext",
"(",
"path",
")",
"path_splits",
"[",
"0",
"]",
"=",
"path_splits",
"[",
"0",
"]",
"+",
"'_%d'",
"%",
"i",
"appended_filename",
"=",
"os",
".",
"extsep",
".",
"join",
"(",
"path_splits",
")",
"if",
"not",
"exists",
"(",
"appended_filename",
")",
"and",
"not",
"islink",
"(",
"appended_filename",
")",
":",
"path",
"=",
"appended_filename",
"break",
"i",
"+=",
"1",
"else",
":",
"raise",
"ValueError",
"(",
"'Did not provide a valid conflicts parameter'",
")",
"if",
"not",
"exists",
"(",
"dirname",
"(",
"path",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"dirname",
"(",
"path",
")",
")",
"if",
"link_to",
":",
"os",
".",
"symlink",
"(",
"link_to",
",",
"path",
")",
"elif",
"contents",
":",
"mode",
"=",
"'wb'",
"if",
"content_mode",
"==",
"'binary'",
"else",
"'w'",
"with",
"open",
"(",
"path",
",",
"mode",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"contents",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'One of contents or link_to must be provided.'",
")"
] | Uses provided filename patterns to write contents to a new path, given
a corresponding entity map.
Args:
path (str): Destination path of the desired contents.
contents (str): Raw text or binary encoded string of contents to write
to the new path.
link_to (str): Optional path with which to create a symbolic link to.
Used as an alternative to and takes priority over the contents
argument.
content_mode (str): Either 'text' or 'binary' to indicate the writing
mode for the new file. Only relevant if contents is provided.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'. | [
"Uses",
"provided",
"filename",
"patterns",
"to",
"write",
"contents",
"to",
"a",
"new",
"path",
"given",
"a",
"corresponding",
"entity",
"map",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/writing.py#L109-L177 | train |
bids-standard/pybids | bids/reports/report.py | BIDSReport.generate | def generate(self, **kwargs):
"""Generate the methods section.
Parameters
----------
task_converter : :obj:`dict`, optional
A dictionary with information for converting task names from BIDS
filename format to human-readable strings.
Returns
-------
counter : :obj:`collections.Counter`
A dictionary of unique descriptions across subjects in the dataset,
along with the number of times each pattern occurred. In cases
where all subjects underwent the same protocol, the most common
pattern is most likely the most complete. In cases where the
dataset contains multiple protocols, each pattern will need to be
inspected manually.
Examples
--------
>>> from os.path import join
>>> from bids.layout import BIDSLayout
>>> from bids.reports import BIDSReport
>>> from bids.tests import get_test_data_path
>>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic'))
>>> report = BIDSReport(layout)
>>> counter = report.generate(session='01')
>>> counter.most_common()[0][0]
"""
descriptions = []
subjs = self.layout.get_subjects(**kwargs)
kwargs = {k: v for k, v in kwargs.items() if k != 'subject'}
for sid in subjs:
descriptions.append(self._report_subject(subject=sid, **kwargs))
counter = Counter(descriptions)
print('Number of patterns detected: {0}'.format(len(counter.keys())))
print(utils.reminder())
return counter | python | def generate(self, **kwargs):
"""Generate the methods section.
Parameters
----------
task_converter : :obj:`dict`, optional
A dictionary with information for converting task names from BIDS
filename format to human-readable strings.
Returns
-------
counter : :obj:`collections.Counter`
A dictionary of unique descriptions across subjects in the dataset,
along with the number of times each pattern occurred. In cases
where all subjects underwent the same protocol, the most common
pattern is most likely the most complete. In cases where the
dataset contains multiple protocols, each pattern will need to be
inspected manually.
Examples
--------
>>> from os.path import join
>>> from bids.layout import BIDSLayout
>>> from bids.reports import BIDSReport
>>> from bids.tests import get_test_data_path
>>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic'))
>>> report = BIDSReport(layout)
>>> counter = report.generate(session='01')
>>> counter.most_common()[0][0]
"""
descriptions = []
subjs = self.layout.get_subjects(**kwargs)
kwargs = {k: v for k, v in kwargs.items() if k != 'subject'}
for sid in subjs:
descriptions.append(self._report_subject(subject=sid, **kwargs))
counter = Counter(descriptions)
print('Number of patterns detected: {0}'.format(len(counter.keys())))
print(utils.reminder())
return counter | [
"def",
"generate",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"descriptions",
"=",
"[",
"]",
"subjs",
"=",
"self",
".",
"layout",
".",
"get_subjects",
"(",
"*",
"*",
"kwargs",
")",
"kwargs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"'subject'",
"}",
"for",
"sid",
"in",
"subjs",
":",
"descriptions",
".",
"append",
"(",
"self",
".",
"_report_subject",
"(",
"subject",
"=",
"sid",
",",
"*",
"*",
"kwargs",
")",
")",
"counter",
"=",
"Counter",
"(",
"descriptions",
")",
"print",
"(",
"'Number of patterns detected: {0}'",
".",
"format",
"(",
"len",
"(",
"counter",
".",
"keys",
"(",
")",
")",
")",
")",
"print",
"(",
"utils",
".",
"reminder",
"(",
")",
")",
"return",
"counter"
] | Generate the methods section.
Parameters
----------
task_converter : :obj:`dict`, optional
A dictionary with information for converting task names from BIDS
filename format to human-readable strings.
Returns
-------
counter : :obj:`collections.Counter`
A dictionary of unique descriptions across subjects in the dataset,
along with the number of times each pattern occurred. In cases
where all subjects underwent the same protocol, the most common
pattern is most likely the most complete. In cases where the
dataset contains multiple protocols, each pattern will need to be
inspected manually.
Examples
--------
>>> from os.path import join
>>> from bids.layout import BIDSLayout
>>> from bids.reports import BIDSReport
>>> from bids.tests import get_test_data_path
>>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic'))
>>> report = BIDSReport(layout)
>>> counter = report.generate(session='01')
>>> counter.most_common()[0][0] | [
"Generate",
"the",
"methods",
"section",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/report.py#L53-L92 | train |
bids-standard/pybids | bids/reports/report.py | BIDSReport._report_subject | def _report_subject(self, subject, **kwargs):
"""Write a report for a single subject.
Parameters
----------
subject : :obj:`str`
Subject ID.
Attributes
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
config : :obj:`dict`
Configuration info for methods generation.
Returns
-------
description : :obj:`str`
A publication-ready report of the dataset's data acquisition
information. Each scan type is given its own paragraph.
"""
description_list = []
# Remove sess from kwargs if provided, else set sess as all available
sessions = kwargs.pop('session',
self.layout.get_sessions(subject=subject,
**kwargs))
if not sessions:
sessions = [None]
elif not isinstance(sessions, list):
sessions = [sessions]
for ses in sessions:
niftis = self.layout.get(subject=subject, extensions='nii.gz',
**kwargs)
if niftis:
description_list.append('For session {0}:'.format(ses))
description_list += parsing.parse_niftis(self.layout, niftis,
subject, self.config,
session=ses)
metadata = self.layout.get_metadata(niftis[0].path)
else:
raise Exception('No niftis for subject {0}'.format(subject))
# Assume all data were converted the same way and use the last nifti
# file's json for conversion information.
if 'metadata' not in vars():
raise Exception('No valid jsons found. Cannot generate final '
'paragraph.')
description = '\n\t'.join(description_list)
description = description.replace('\tFor session', '\nFor session')
description += '\n\n{0}'.format(parsing.final_paragraph(metadata))
return description | python | def _report_subject(self, subject, **kwargs):
"""Write a report for a single subject.
Parameters
----------
subject : :obj:`str`
Subject ID.
Attributes
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
config : :obj:`dict`
Configuration info for methods generation.
Returns
-------
description : :obj:`str`
A publication-ready report of the dataset's data acquisition
information. Each scan type is given its own paragraph.
"""
description_list = []
# Remove sess from kwargs if provided, else set sess as all available
sessions = kwargs.pop('session',
self.layout.get_sessions(subject=subject,
**kwargs))
if not sessions:
sessions = [None]
elif not isinstance(sessions, list):
sessions = [sessions]
for ses in sessions:
niftis = self.layout.get(subject=subject, extensions='nii.gz',
**kwargs)
if niftis:
description_list.append('For session {0}:'.format(ses))
description_list += parsing.parse_niftis(self.layout, niftis,
subject, self.config,
session=ses)
metadata = self.layout.get_metadata(niftis[0].path)
else:
raise Exception('No niftis for subject {0}'.format(subject))
# Assume all data were converted the same way and use the last nifti
# file's json for conversion information.
if 'metadata' not in vars():
raise Exception('No valid jsons found. Cannot generate final '
'paragraph.')
description = '\n\t'.join(description_list)
description = description.replace('\tFor session', '\nFor session')
description += '\n\n{0}'.format(parsing.final_paragraph(metadata))
return description | [
"def",
"_report_subject",
"(",
"self",
",",
"subject",
",",
"*",
"*",
"kwargs",
")",
":",
"description_list",
"=",
"[",
"]",
"# Remove sess from kwargs if provided, else set sess as all available",
"sessions",
"=",
"kwargs",
".",
"pop",
"(",
"'session'",
",",
"self",
".",
"layout",
".",
"get_sessions",
"(",
"subject",
"=",
"subject",
",",
"*",
"*",
"kwargs",
")",
")",
"if",
"not",
"sessions",
":",
"sessions",
"=",
"[",
"None",
"]",
"elif",
"not",
"isinstance",
"(",
"sessions",
",",
"list",
")",
":",
"sessions",
"=",
"[",
"sessions",
"]",
"for",
"ses",
"in",
"sessions",
":",
"niftis",
"=",
"self",
".",
"layout",
".",
"get",
"(",
"subject",
"=",
"subject",
",",
"extensions",
"=",
"'nii.gz'",
",",
"*",
"*",
"kwargs",
")",
"if",
"niftis",
":",
"description_list",
".",
"append",
"(",
"'For session {0}:'",
".",
"format",
"(",
"ses",
")",
")",
"description_list",
"+=",
"parsing",
".",
"parse_niftis",
"(",
"self",
".",
"layout",
",",
"niftis",
",",
"subject",
",",
"self",
".",
"config",
",",
"session",
"=",
"ses",
")",
"metadata",
"=",
"self",
".",
"layout",
".",
"get_metadata",
"(",
"niftis",
"[",
"0",
"]",
".",
"path",
")",
"else",
":",
"raise",
"Exception",
"(",
"'No niftis for subject {0}'",
".",
"format",
"(",
"subject",
")",
")",
"# Assume all data were converted the same way and use the last nifti",
"# file's json for conversion information.",
"if",
"'metadata'",
"not",
"in",
"vars",
"(",
")",
":",
"raise",
"Exception",
"(",
"'No valid jsons found. Cannot generate final '",
"'paragraph.'",
")",
"description",
"=",
"'\\n\\t'",
".",
"join",
"(",
"description_list",
")",
"description",
"=",
"description",
".",
"replace",
"(",
"'\\tFor session'",
",",
"'\\nFor session'",
")",
"description",
"+=",
"'\\n\\n{0}'",
".",
"format",
"(",
"parsing",
".",
"final_paragraph",
"(",
"metadata",
")",
")",
"return",
"description"
] | Write a report for a single subject.
Parameters
----------
subject : :obj:`str`
Subject ID.
Attributes
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
config : :obj:`dict`
Configuration info for methods generation.
Returns
-------
description : :obj:`str`
A publication-ready report of the dataset's data acquisition
information. Each scan type is given its own paragraph. | [
"Write",
"a",
"report",
"for",
"a",
"single",
"subject",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/report.py#L94-L147 | train |
bids-standard/pybids | bids/analysis/hrf.py | _gamma_difference_hrf | def _gamma_difference_hrf(tr, oversampling=50, time_length=32., onset=0.,
delay=6, undershoot=16., dispersion=1.,
u_dispersion=1., ratio=0.167):
""" Compute an hrf as the difference of two gamma functions
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional (default=16)
temporal oversampling factor
time_length : float, optional (default=32)
hrf kernel length, in seconds
onset: float
onset time of the hrf
delay: float, optional
delay parameter of the hrf (in s.)
undershoot: float, optional
undershoot parameter of the hrf (in s.)
dispersion : float, optional
dispersion parameter for the first gamma function
u_dispersion : float, optional
dispersion parameter for the second gamma function
ratio : float, optional
ratio of the two gamma components
Returns
-------
hrf : array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid
"""
from scipy.stats import gamma
dt = tr / oversampling
time_stamps = np.linspace(0, time_length, np.rint(float(time_length) / dt).astype(np.int))
time_stamps -= onset
hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) -\
ratio * gamma.pdf(
time_stamps, undershoot / u_dispersion, dt / u_dispersion)
hrf /= hrf.sum()
return hrf | python | def _gamma_difference_hrf(tr, oversampling=50, time_length=32., onset=0.,
delay=6, undershoot=16., dispersion=1.,
u_dispersion=1., ratio=0.167):
""" Compute an hrf as the difference of two gamma functions
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional (default=16)
temporal oversampling factor
time_length : float, optional (default=32)
hrf kernel length, in seconds
onset: float
onset time of the hrf
delay: float, optional
delay parameter of the hrf (in s.)
undershoot: float, optional
undershoot parameter of the hrf (in s.)
dispersion : float, optional
dispersion parameter for the first gamma function
u_dispersion : float, optional
dispersion parameter for the second gamma function
ratio : float, optional
ratio of the two gamma components
Returns
-------
hrf : array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid
"""
from scipy.stats import gamma
dt = tr / oversampling
time_stamps = np.linspace(0, time_length, np.rint(float(time_length) / dt).astype(np.int))
time_stamps -= onset
hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) -\
ratio * gamma.pdf(
time_stamps, undershoot / u_dispersion, dt / u_dispersion)
hrf /= hrf.sum()
return hrf | [
"def",
"_gamma_difference_hrf",
"(",
"tr",
",",
"oversampling",
"=",
"50",
",",
"time_length",
"=",
"32.",
",",
"onset",
"=",
"0.",
",",
"delay",
"=",
"6",
",",
"undershoot",
"=",
"16.",
",",
"dispersion",
"=",
"1.",
",",
"u_dispersion",
"=",
"1.",
",",
"ratio",
"=",
"0.167",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"gamma",
"dt",
"=",
"tr",
"/",
"oversampling",
"time_stamps",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"time_length",
",",
"np",
".",
"rint",
"(",
"float",
"(",
"time_length",
")",
"/",
"dt",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
")",
"time_stamps",
"-=",
"onset",
"hrf",
"=",
"gamma",
".",
"pdf",
"(",
"time_stamps",
",",
"delay",
"/",
"dispersion",
",",
"dt",
"/",
"dispersion",
")",
"-",
"ratio",
"*",
"gamma",
".",
"pdf",
"(",
"time_stamps",
",",
"undershoot",
"/",
"u_dispersion",
",",
"dt",
"/",
"u_dispersion",
")",
"hrf",
"/=",
"hrf",
".",
"sum",
"(",
")",
"return",
"hrf"
] | Compute an hrf as the difference of two gamma functions
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional (default=16)
temporal oversampling factor
time_length : float, optional (default=32)
hrf kernel length, in seconds
onset: float
onset time of the hrf
delay: float, optional
delay parameter of the hrf (in s.)
undershoot: float, optional
undershoot parameter of the hrf (in s.)
dispersion : float, optional
dispersion parameter for the first gamma function
u_dispersion : float, optional
dispersion parameter for the second gamma function
ratio : float, optional
ratio of the two gamma components
Returns
-------
hrf : array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid | [
"Compute",
"an",
"hrf",
"as",
"the",
"difference",
"of",
"two",
"gamma",
"functions"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L13-L61 | train |
bids-standard/pybids | bids/analysis/hrf.py | spm_hrf | def spm_hrf(tr, oversampling=50, time_length=32., onset=0.):
""" Implementation of the SPM hrf model
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional
temporal oversampling factor
time_length : float, optional
hrf kernel length, in seconds
onset : float, optional
hrf onset time, in seconds
Returns
-------
hrf: array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid
"""
return _gamma_difference_hrf(tr, oversampling, time_length, onset) | python | def spm_hrf(tr, oversampling=50, time_length=32., onset=0.):
""" Implementation of the SPM hrf model
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional
temporal oversampling factor
time_length : float, optional
hrf kernel length, in seconds
onset : float, optional
hrf onset time, in seconds
Returns
-------
hrf: array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid
"""
return _gamma_difference_hrf(tr, oversampling, time_length, onset) | [
"def",
"spm_hrf",
"(",
"tr",
",",
"oversampling",
"=",
"50",
",",
"time_length",
"=",
"32.",
",",
"onset",
"=",
"0.",
")",
":",
"return",
"_gamma_difference_hrf",
"(",
"tr",
",",
"oversampling",
",",
"time_length",
",",
"onset",
")"
] | Implementation of the SPM hrf model
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional
temporal oversampling factor
time_length : float, optional
hrf kernel length, in seconds
onset : float, optional
hrf onset time, in seconds
Returns
-------
hrf: array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid | [
"Implementation",
"of",
"the",
"SPM",
"hrf",
"model"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L64-L86 | train |
bids-standard/pybids | bids/analysis/hrf.py | glover_hrf | def glover_hrf(tr, oversampling=50, time_length=32., onset=0.):
""" Implementation of the Glover hrf model
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional
temporal oversampling factor
time_length : float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response
Returns
-------
hrf: array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid
"""
return _gamma_difference_hrf(tr, oversampling, time_length, onset,
delay=6, undershoot=12., dispersion=.9,
u_dispersion=.9, ratio=.35) | python | def glover_hrf(tr, oversampling=50, time_length=32., onset=0.):
""" Implementation of the Glover hrf model
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional
temporal oversampling factor
time_length : float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response
Returns
-------
hrf: array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid
"""
return _gamma_difference_hrf(tr, oversampling, time_length, onset,
delay=6, undershoot=12., dispersion=.9,
u_dispersion=.9, ratio=.35) | [
"def",
"glover_hrf",
"(",
"tr",
",",
"oversampling",
"=",
"50",
",",
"time_length",
"=",
"32.",
",",
"onset",
"=",
"0.",
")",
":",
"return",
"_gamma_difference_hrf",
"(",
"tr",
",",
"oversampling",
",",
"time_length",
",",
"onset",
",",
"delay",
"=",
"6",
",",
"undershoot",
"=",
"12.",
",",
"dispersion",
"=",
".9",
",",
"u_dispersion",
"=",
".9",
",",
"ratio",
"=",
".35",
")"
] | Implementation of the Glover hrf model
Parameters
----------
tr : float
scan repeat time, in seconds
oversampling : int, optional
temporal oversampling factor
time_length : float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response
Returns
-------
hrf: array of shape(length / tr * oversampling, dtype=float)
hrf sampling on the oversampled time grid | [
"Implementation",
"of",
"the",
"Glover",
"hrf",
"model"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L89-L113 | train |
bids-standard/pybids | bids/analysis/hrf.py | spm_dispersion_derivative | def spm_dispersion_derivative(tr, oversampling=50, time_length=32., onset=0.):
"""Implementation of the SPM dispersion derivative hrf model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int, optional
temporal oversampling factor in seconds
time_length: float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response in seconds
Returns
-------
dhrf: array of shape(length / tr * oversampling), dtype=float
dhrf sampling on the oversampled time grid
"""
dd = .01
dhrf = 1. / dd * (
- _gamma_difference_hrf(tr, oversampling, time_length,
onset, dispersion=1. + dd)
+ _gamma_difference_hrf(tr, oversampling, time_length, onset))
return dhrf | python | def spm_dispersion_derivative(tr, oversampling=50, time_length=32., onset=0.):
"""Implementation of the SPM dispersion derivative hrf model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int, optional
temporal oversampling factor in seconds
time_length: float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response in seconds
Returns
-------
dhrf: array of shape(length / tr * oversampling), dtype=float
dhrf sampling on the oversampled time grid
"""
dd = .01
dhrf = 1. / dd * (
- _gamma_difference_hrf(tr, oversampling, time_length,
onset, dispersion=1. + dd)
+ _gamma_difference_hrf(tr, oversampling, time_length, onset))
return dhrf | [
"def",
"spm_dispersion_derivative",
"(",
"tr",
",",
"oversampling",
"=",
"50",
",",
"time_length",
"=",
"32.",
",",
"onset",
"=",
"0.",
")",
":",
"dd",
"=",
".01",
"dhrf",
"=",
"1.",
"/",
"dd",
"*",
"(",
"-",
"_gamma_difference_hrf",
"(",
"tr",
",",
"oversampling",
",",
"time_length",
",",
"onset",
",",
"dispersion",
"=",
"1.",
"+",
"dd",
")",
"+",
"_gamma_difference_hrf",
"(",
"tr",
",",
"oversampling",
",",
"time_length",
",",
"onset",
")",
")",
"return",
"dhrf"
] | Implementation of the SPM dispersion derivative hrf model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int, optional
temporal oversampling factor in seconds
time_length: float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response in seconds
Returns
-------
dhrf: array of shape(length / tr * oversampling), dtype=float
dhrf sampling on the oversampled time grid | [
"Implementation",
"of",
"the",
"SPM",
"dispersion",
"derivative",
"hrf",
"model"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L169-L196 | train |
bids-standard/pybids | bids/analysis/hrf.py | glover_dispersion_derivative | def glover_dispersion_derivative(tr, oversampling=50, time_length=32.,
onset=0.):
"""Implementation of the Glover dispersion derivative hrf model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int, optional
temporal oversampling factor in seconds
time_length: float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response in seconds
Returns
-------
dhrf: array of shape(length / tr * oversampling), dtype=float
dhrf sampling on the oversampled time grid
"""
dd = .01
dhrf = 1. / dd * (
- _gamma_difference_hrf(
tr, oversampling, time_length, onset,
delay=6, undershoot=12., dispersion=.9 + dd, ratio=.35)
+ _gamma_difference_hrf(
tr, oversampling, time_length, onset, delay=6, undershoot=12.,
dispersion=.9, ratio=.35))
return dhrf | python | def glover_dispersion_derivative(tr, oversampling=50, time_length=32.,
onset=0.):
"""Implementation of the Glover dispersion derivative hrf model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int, optional
temporal oversampling factor in seconds
time_length: float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response in seconds
Returns
-------
dhrf: array of shape(length / tr * oversampling), dtype=float
dhrf sampling on the oversampled time grid
"""
dd = .01
dhrf = 1. / dd * (
- _gamma_difference_hrf(
tr, oversampling, time_length, onset,
delay=6, undershoot=12., dispersion=.9 + dd, ratio=.35)
+ _gamma_difference_hrf(
tr, oversampling, time_length, onset, delay=6, undershoot=12.,
dispersion=.9, ratio=.35))
return dhrf | [
"def",
"glover_dispersion_derivative",
"(",
"tr",
",",
"oversampling",
"=",
"50",
",",
"time_length",
"=",
"32.",
",",
"onset",
"=",
"0.",
")",
":",
"dd",
"=",
".01",
"dhrf",
"=",
"1.",
"/",
"dd",
"*",
"(",
"-",
"_gamma_difference_hrf",
"(",
"tr",
",",
"oversampling",
",",
"time_length",
",",
"onset",
",",
"delay",
"=",
"6",
",",
"undershoot",
"=",
"12.",
",",
"dispersion",
"=",
".9",
"+",
"dd",
",",
"ratio",
"=",
".35",
")",
"+",
"_gamma_difference_hrf",
"(",
"tr",
",",
"oversampling",
",",
"time_length",
",",
"onset",
",",
"delay",
"=",
"6",
",",
"undershoot",
"=",
"12.",
",",
"dispersion",
"=",
".9",
",",
"ratio",
"=",
".35",
")",
")",
"return",
"dhrf"
] | Implementation of the Glover dispersion derivative hrf model
Parameters
----------
tr: float
scan repeat time, in seconds
oversampling: int, optional
temporal oversampling factor in seconds
time_length: float, optional
hrf kernel length, in seconds
onset : float, optional
onset of the response in seconds
Returns
-------
dhrf: array of shape(length / tr * oversampling), dtype=float
dhrf sampling on the oversampled time grid | [
"Implementation",
"of",
"the",
"Glover",
"dispersion",
"derivative",
"hrf",
"model"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L199-L230 | train |
bids-standard/pybids | bids/analysis/hrf.py | _sample_condition | def _sample_condition(exp_condition, frame_times, oversampling=50,
min_onset=-24):
"""Make a possibly oversampled event regressor from condition information.
Parameters
----------
exp_condition : arraylike of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
frame_times : array of shape(n_scans)
sample time points
over_sampling : int, optional
factor for oversampling event regressor
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
regressor: array of shape(over_sampling * n_scans)
possibly oversampled event regressor
hr_frame_times : array of shape(over_sampling * n_scans)
time points used for regressor sampling
"""
# Find the high-resolution frame_times
n = frame_times.size
min_onset = float(min_onset)
n_hr = ((n - 1) * 1. / (frame_times.max() - frame_times.min()) *
(frame_times.max() * (1 + 1. / (n - 1)) - frame_times.min() -
min_onset) * oversampling) + 1
hr_frame_times = np.linspace(frame_times.min() + min_onset,
frame_times.max() * (1 + 1. / (n - 1)),
np.rint(n_hr).astype(np.int))
# Get the condition information
onsets, durations, values = tuple(map(np.asanyarray, exp_condition))
if (onsets < frame_times[0] + min_onset).any():
warnings.warn(('Some stimulus onsets are earlier than %s in the'
' experiment and are thus not considered in the model'
% (frame_times[0] + min_onset)), UserWarning)
# Set up the regressor timecourse
tmax = len(hr_frame_times)
regressor = np.zeros_like(hr_frame_times).astype(np.float)
t_onset = np.minimum(np.searchsorted(hr_frame_times, onsets), tmax - 1)
regressor[t_onset] += values
t_offset = np.minimum(
np.searchsorted(hr_frame_times, onsets + durations),
tmax - 1)
# Handle the case where duration is 0 by offsetting at t + 1
for i, t in enumerate(t_offset):
if t < (tmax - 1) and t == t_onset[i]:
t_offset[i] += 1
regressor[t_offset] -= values
regressor = np.cumsum(regressor)
return regressor, hr_frame_times | python | def _sample_condition(exp_condition, frame_times, oversampling=50,
min_onset=-24):
"""Make a possibly oversampled event regressor from condition information.
Parameters
----------
exp_condition : arraylike of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
frame_times : array of shape(n_scans)
sample time points
over_sampling : int, optional
factor for oversampling event regressor
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
regressor: array of shape(over_sampling * n_scans)
possibly oversampled event regressor
hr_frame_times : array of shape(over_sampling * n_scans)
time points used for regressor sampling
"""
# Find the high-resolution frame_times
n = frame_times.size
min_onset = float(min_onset)
n_hr = ((n - 1) * 1. / (frame_times.max() - frame_times.min()) *
(frame_times.max() * (1 + 1. / (n - 1)) - frame_times.min() -
min_onset) * oversampling) + 1
hr_frame_times = np.linspace(frame_times.min() + min_onset,
frame_times.max() * (1 + 1. / (n - 1)),
np.rint(n_hr).astype(np.int))
# Get the condition information
onsets, durations, values = tuple(map(np.asanyarray, exp_condition))
if (onsets < frame_times[0] + min_onset).any():
warnings.warn(('Some stimulus onsets are earlier than %s in the'
' experiment and are thus not considered in the model'
% (frame_times[0] + min_onset)), UserWarning)
# Set up the regressor timecourse
tmax = len(hr_frame_times)
regressor = np.zeros_like(hr_frame_times).astype(np.float)
t_onset = np.minimum(np.searchsorted(hr_frame_times, onsets), tmax - 1)
regressor[t_onset] += values
t_offset = np.minimum(
np.searchsorted(hr_frame_times, onsets + durations),
tmax - 1)
# Handle the case where duration is 0 by offsetting at t + 1
for i, t in enumerate(t_offset):
if t < (tmax - 1) and t == t_onset[i]:
t_offset[i] += 1
regressor[t_offset] -= values
regressor = np.cumsum(regressor)
return regressor, hr_frame_times | [
"def",
"_sample_condition",
"(",
"exp_condition",
",",
"frame_times",
",",
"oversampling",
"=",
"50",
",",
"min_onset",
"=",
"-",
"24",
")",
":",
"# Find the high-resolution frame_times",
"n",
"=",
"frame_times",
".",
"size",
"min_onset",
"=",
"float",
"(",
"min_onset",
")",
"n_hr",
"=",
"(",
"(",
"n",
"-",
"1",
")",
"*",
"1.",
"/",
"(",
"frame_times",
".",
"max",
"(",
")",
"-",
"frame_times",
".",
"min",
"(",
")",
")",
"*",
"(",
"frame_times",
".",
"max",
"(",
")",
"*",
"(",
"1",
"+",
"1.",
"/",
"(",
"n",
"-",
"1",
")",
")",
"-",
"frame_times",
".",
"min",
"(",
")",
"-",
"min_onset",
")",
"*",
"oversampling",
")",
"+",
"1",
"hr_frame_times",
"=",
"np",
".",
"linspace",
"(",
"frame_times",
".",
"min",
"(",
")",
"+",
"min_onset",
",",
"frame_times",
".",
"max",
"(",
")",
"*",
"(",
"1",
"+",
"1.",
"/",
"(",
"n",
"-",
"1",
")",
")",
",",
"np",
".",
"rint",
"(",
"n_hr",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
")",
"# Get the condition information",
"onsets",
",",
"durations",
",",
"values",
"=",
"tuple",
"(",
"map",
"(",
"np",
".",
"asanyarray",
",",
"exp_condition",
")",
")",
"if",
"(",
"onsets",
"<",
"frame_times",
"[",
"0",
"]",
"+",
"min_onset",
")",
".",
"any",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"(",
"'Some stimulus onsets are earlier than %s in the'",
"' experiment and are thus not considered in the model'",
"%",
"(",
"frame_times",
"[",
"0",
"]",
"+",
"min_onset",
")",
")",
",",
"UserWarning",
")",
"# Set up the regressor timecourse",
"tmax",
"=",
"len",
"(",
"hr_frame_times",
")",
"regressor",
"=",
"np",
".",
"zeros_like",
"(",
"hr_frame_times",
")",
".",
"astype",
"(",
"np",
".",
"float",
")",
"t_onset",
"=",
"np",
".",
"minimum",
"(",
"np",
".",
"searchsorted",
"(",
"hr_frame_times",
",",
"onsets",
")",
",",
"tmax",
"-",
"1",
")",
"regressor",
"[",
"t_onset",
"]",
"+=",
"values",
"t_offset",
"=",
"np",
".",
"minimum",
"(",
"np",
".",
"searchsorted",
"(",
"hr_frame_times",
",",
"onsets",
"+",
"durations",
")",
",",
"tmax",
"-",
"1",
")",
"# Handle the case where duration is 0 by offsetting at t + 1",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"t_offset",
")",
":",
"if",
"t",
"<",
"(",
"tmax",
"-",
"1",
")",
"and",
"t",
"==",
"t_onset",
"[",
"i",
"]",
":",
"t_offset",
"[",
"i",
"]",
"+=",
"1",
"regressor",
"[",
"t_offset",
"]",
"-=",
"values",
"regressor",
"=",
"np",
".",
"cumsum",
"(",
"regressor",
")",
"return",
"regressor",
",",
"hr_frame_times"
] | Make a possibly oversampled event regressor from condition information.
Parameters
----------
exp_condition : arraylike of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
frame_times : array of shape(n_scans)
sample time points
over_sampling : int, optional
factor for oversampling event regressor
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
regressor: array of shape(over_sampling * n_scans)
possibly oversampled event regressor
hr_frame_times : array of shape(over_sampling * n_scans)
time points used for regressor sampling | [
"Make",
"a",
"possibly",
"oversampled",
"event",
"regressor",
"from",
"condition",
"information",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L233-L295 | train |
bids-standard/pybids | bids/analysis/hrf.py | _resample_regressor | def _resample_regressor(hr_regressor, hr_frame_times, frame_times):
""" this function sub-samples the regressors at frame times
Parameters
----------
hr_regressor : array of shape(n_samples),
the regressor time course sampled at high temporal resolution
hr_frame_times : array of shape(n_samples),
the corresponding time stamps
frame_times: array of shape(n_scans),
the desired time stamps
Returns
-------
regressor: array of shape(n_scans)
the resampled regressor
"""
from scipy.interpolate import interp1d
f = interp1d(hr_frame_times, hr_regressor)
return f(frame_times).T | python | def _resample_regressor(hr_regressor, hr_frame_times, frame_times):
""" this function sub-samples the regressors at frame times
Parameters
----------
hr_regressor : array of shape(n_samples),
the regressor time course sampled at high temporal resolution
hr_frame_times : array of shape(n_samples),
the corresponding time stamps
frame_times: array of shape(n_scans),
the desired time stamps
Returns
-------
regressor: array of shape(n_scans)
the resampled regressor
"""
from scipy.interpolate import interp1d
f = interp1d(hr_frame_times, hr_regressor)
return f(frame_times).T | [
"def",
"_resample_regressor",
"(",
"hr_regressor",
",",
"hr_frame_times",
",",
"frame_times",
")",
":",
"from",
"scipy",
".",
"interpolate",
"import",
"interp1d",
"f",
"=",
"interp1d",
"(",
"hr_frame_times",
",",
"hr_regressor",
")",
"return",
"f",
"(",
"frame_times",
")",
".",
"T"
] | this function sub-samples the regressors at frame times
Parameters
----------
hr_regressor : array of shape(n_samples),
the regressor time course sampled at high temporal resolution
hr_frame_times : array of shape(n_samples),
the corresponding time stamps
frame_times: array of shape(n_scans),
the desired time stamps
Returns
-------
regressor: array of shape(n_scans)
the resampled regressor | [
"this",
"function",
"sub",
"-",
"samples",
"the",
"regressors",
"at",
"frame",
"times"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L298-L319 | train |
bids-standard/pybids | bids/analysis/hrf.py | _orthogonalize | def _orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p)
the data to be orthogonalized
Returns
-------
X: array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized
"""
if X.size == X.shape[0]:
return X
from scipy.linalg import pinv, norm
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i]))
# X[:, i] /= norm(X[:, i])
return X | python | def _orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p)
the data to be orthogonalized
Returns
-------
X: array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized
"""
if X.size == X.shape[0]:
return X
from scipy.linalg import pinv, norm
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i]))
# X[:, i] /= norm(X[:, i])
return X | [
"def",
"_orthogonalize",
"(",
"X",
")",
":",
"if",
"X",
".",
"size",
"==",
"X",
".",
"shape",
"[",
"0",
"]",
":",
"return",
"X",
"from",
"scipy",
".",
"linalg",
"import",
"pinv",
",",
"norm",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"X",
".",
"shape",
"[",
"1",
"]",
")",
":",
"X",
"[",
":",
",",
"i",
"]",
"-=",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"X",
"[",
":",
",",
"i",
"]",
",",
"X",
"[",
":",
",",
":",
"i",
"]",
")",
",",
"pinv",
"(",
"X",
"[",
":",
",",
":",
"i",
"]",
")",
")",
"# X[:, i] /= norm(X[:, i])",
"return",
"X"
] | Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p)
the data to be orthogonalized
Returns
-------
X: array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized | [
"Orthogonalize",
"every",
"column",
"of",
"design",
"X",
"w",
".",
"r",
".",
"t",
"preceding",
"columns"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L322-L345 | train |
bids-standard/pybids | bids/analysis/hrf.py | _regressor_names | def _regressor_names(con_name, hrf_model, fir_delays=None):
""" Returns a list of regressor names, computed from con-name and hrf type
Parameters
----------
con_name: string
identifier of the condition
hrf_model: string or None,
hrf model chosen
fir_delays: 1D array_like, optional,
Delays used in case of an FIR model
Returns
-------
names: list of strings,
regressor names
"""
if hrf_model in ['glover', 'spm', None]:
return [con_name]
elif hrf_model in ["glover + derivative", 'spm + derivative']:
return [con_name, con_name + "_derivative"]
elif hrf_model in ['spm + derivative + dispersion',
'glover + derivative + dispersion']:
return [con_name, con_name + "_derivative", con_name + "_dispersion"]
elif hrf_model == 'fir':
return [con_name + "_delay_%d" % i for i in fir_delays] | python | def _regressor_names(con_name, hrf_model, fir_delays=None):
""" Returns a list of regressor names, computed from con-name and hrf type
Parameters
----------
con_name: string
identifier of the condition
hrf_model: string or None,
hrf model chosen
fir_delays: 1D array_like, optional,
Delays used in case of an FIR model
Returns
-------
names: list of strings,
regressor names
"""
if hrf_model in ['glover', 'spm', None]:
return [con_name]
elif hrf_model in ["glover + derivative", 'spm + derivative']:
return [con_name, con_name + "_derivative"]
elif hrf_model in ['spm + derivative + dispersion',
'glover + derivative + dispersion']:
return [con_name, con_name + "_derivative", con_name + "_dispersion"]
elif hrf_model == 'fir':
return [con_name + "_delay_%d" % i for i in fir_delays] | [
"def",
"_regressor_names",
"(",
"con_name",
",",
"hrf_model",
",",
"fir_delays",
"=",
"None",
")",
":",
"if",
"hrf_model",
"in",
"[",
"'glover'",
",",
"'spm'",
",",
"None",
"]",
":",
"return",
"[",
"con_name",
"]",
"elif",
"hrf_model",
"in",
"[",
"\"glover + derivative\"",
",",
"'spm + derivative'",
"]",
":",
"return",
"[",
"con_name",
",",
"con_name",
"+",
"\"_derivative\"",
"]",
"elif",
"hrf_model",
"in",
"[",
"'spm + derivative + dispersion'",
",",
"'glover + derivative + dispersion'",
"]",
":",
"return",
"[",
"con_name",
",",
"con_name",
"+",
"\"_derivative\"",
",",
"con_name",
"+",
"\"_dispersion\"",
"]",
"elif",
"hrf_model",
"==",
"'fir'",
":",
"return",
"[",
"con_name",
"+",
"\"_delay_%d\"",
"%",
"i",
"for",
"i",
"in",
"fir_delays",
"]"
] | Returns a list of regressor names, computed from con-name and hrf type
Parameters
----------
con_name: string
identifier of the condition
hrf_model: string or None,
hrf model chosen
fir_delays: 1D array_like, optional,
Delays used in case of an FIR model
Returns
-------
names: list of strings,
regressor names | [
"Returns",
"a",
"list",
"of",
"regressor",
"names",
"computed",
"from",
"con",
"-",
"name",
"and",
"hrf",
"type"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L348-L375 | train |
bids-standard/pybids | bids/analysis/hrf.py | _hrf_kernel | def _hrf_kernel(hrf_model, tr, oversampling=50, fir_delays=None):
""" Given the specification of the hemodynamic model and time parameters,
return the list of matching kernels
Parameters
----------
hrf_model : string or None,
identifier of the hrf model
tr : float
the repetition time in seconds
oversampling : int, optional
temporal oversampling factor to have a smooth hrf
fir_delays : list of floats,
list of delays for finite impulse response models
Returns
-------
hkernel : list of arrays
samples of the hrf (the number depends on the hrf_model used)
"""
acceptable_hrfs = [
'spm', 'spm + derivative', 'spm + derivative + dispersion', 'fir',
'glover', 'glover + derivative', 'glover + derivative + dispersion',
None]
if hrf_model == 'spm':
hkernel = [spm_hrf(tr, oversampling)]
elif hrf_model == 'spm + derivative':
hkernel = [spm_hrf(tr, oversampling),
spm_time_derivative(tr, oversampling)]
elif hrf_model == 'spm + derivative + dispersion':
hkernel = [spm_hrf(tr, oversampling),
spm_time_derivative(tr, oversampling),
spm_dispersion_derivative(tr, oversampling)]
elif hrf_model == 'glover':
hkernel = [glover_hrf(tr, oversampling)]
elif hrf_model == 'glover + derivative':
hkernel = [glover_hrf(tr, oversampling),
glover_time_derivative(tr, oversampling)]
elif hrf_model == 'glover + derivative + dispersion':
hkernel = [glover_hrf(tr, oversampling),
glover_time_derivative(tr, oversampling),
glover_dispersion_derivative(tr, oversampling)]
elif hrf_model == 'fir':
hkernel = [np.hstack((np.zeros(f * oversampling),
np.ones(oversampling)))
for f in fir_delays]
elif hrf_model is None:
hkernel = [np.hstack((1, np.zeros(oversampling - 1)))]
else:
raise ValueError('"{0}" is not a known hrf model. Use one of {1}'.
format(hrf_model, acceptable_hrfs))
return hkernel | python | def _hrf_kernel(hrf_model, tr, oversampling=50, fir_delays=None):
""" Given the specification of the hemodynamic model and time parameters,
return the list of matching kernels
Parameters
----------
hrf_model : string or None,
identifier of the hrf model
tr : float
the repetition time in seconds
oversampling : int, optional
temporal oversampling factor to have a smooth hrf
fir_delays : list of floats,
list of delays for finite impulse response models
Returns
-------
hkernel : list of arrays
samples of the hrf (the number depends on the hrf_model used)
"""
acceptable_hrfs = [
'spm', 'spm + derivative', 'spm + derivative + dispersion', 'fir',
'glover', 'glover + derivative', 'glover + derivative + dispersion',
None]
if hrf_model == 'spm':
hkernel = [spm_hrf(tr, oversampling)]
elif hrf_model == 'spm + derivative':
hkernel = [spm_hrf(tr, oversampling),
spm_time_derivative(tr, oversampling)]
elif hrf_model == 'spm + derivative + dispersion':
hkernel = [spm_hrf(tr, oversampling),
spm_time_derivative(tr, oversampling),
spm_dispersion_derivative(tr, oversampling)]
elif hrf_model == 'glover':
hkernel = [glover_hrf(tr, oversampling)]
elif hrf_model == 'glover + derivative':
hkernel = [glover_hrf(tr, oversampling),
glover_time_derivative(tr, oversampling)]
elif hrf_model == 'glover + derivative + dispersion':
hkernel = [glover_hrf(tr, oversampling),
glover_time_derivative(tr, oversampling),
glover_dispersion_derivative(tr, oversampling)]
elif hrf_model == 'fir':
hkernel = [np.hstack((np.zeros(f * oversampling),
np.ones(oversampling)))
for f in fir_delays]
elif hrf_model is None:
hkernel = [np.hstack((1, np.zeros(oversampling - 1)))]
else:
raise ValueError('"{0}" is not a known hrf model. Use one of {1}'.
format(hrf_model, acceptable_hrfs))
return hkernel | [
"def",
"_hrf_kernel",
"(",
"hrf_model",
",",
"tr",
",",
"oversampling",
"=",
"50",
",",
"fir_delays",
"=",
"None",
")",
":",
"acceptable_hrfs",
"=",
"[",
"'spm'",
",",
"'spm + derivative'",
",",
"'spm + derivative + dispersion'",
",",
"'fir'",
",",
"'glover'",
",",
"'glover + derivative'",
",",
"'glover + derivative + dispersion'",
",",
"None",
"]",
"if",
"hrf_model",
"==",
"'spm'",
":",
"hkernel",
"=",
"[",
"spm_hrf",
"(",
"tr",
",",
"oversampling",
")",
"]",
"elif",
"hrf_model",
"==",
"'spm + derivative'",
":",
"hkernel",
"=",
"[",
"spm_hrf",
"(",
"tr",
",",
"oversampling",
")",
",",
"spm_time_derivative",
"(",
"tr",
",",
"oversampling",
")",
"]",
"elif",
"hrf_model",
"==",
"'spm + derivative + dispersion'",
":",
"hkernel",
"=",
"[",
"spm_hrf",
"(",
"tr",
",",
"oversampling",
")",
",",
"spm_time_derivative",
"(",
"tr",
",",
"oversampling",
")",
",",
"spm_dispersion_derivative",
"(",
"tr",
",",
"oversampling",
")",
"]",
"elif",
"hrf_model",
"==",
"'glover'",
":",
"hkernel",
"=",
"[",
"glover_hrf",
"(",
"tr",
",",
"oversampling",
")",
"]",
"elif",
"hrf_model",
"==",
"'glover + derivative'",
":",
"hkernel",
"=",
"[",
"glover_hrf",
"(",
"tr",
",",
"oversampling",
")",
",",
"glover_time_derivative",
"(",
"tr",
",",
"oversampling",
")",
"]",
"elif",
"hrf_model",
"==",
"'glover + derivative + dispersion'",
":",
"hkernel",
"=",
"[",
"glover_hrf",
"(",
"tr",
",",
"oversampling",
")",
",",
"glover_time_derivative",
"(",
"tr",
",",
"oversampling",
")",
",",
"glover_dispersion_derivative",
"(",
"tr",
",",
"oversampling",
")",
"]",
"elif",
"hrf_model",
"==",
"'fir'",
":",
"hkernel",
"=",
"[",
"np",
".",
"hstack",
"(",
"(",
"np",
".",
"zeros",
"(",
"f",
"*",
"oversampling",
")",
",",
"np",
".",
"ones",
"(",
"oversampling",
")",
")",
")",
"for",
"f",
"in",
"fir_delays",
"]",
"elif",
"hrf_model",
"is",
"None",
":",
"hkernel",
"=",
"[",
"np",
".",
"hstack",
"(",
"(",
"1",
",",
"np",
".",
"zeros",
"(",
"oversampling",
"-",
"1",
")",
")",
")",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'\"{0}\" is not a known hrf model. Use one of {1}'",
".",
"format",
"(",
"hrf_model",
",",
"acceptable_hrfs",
")",
")",
"return",
"hkernel"
] | Given the specification of the hemodynamic model and time parameters,
return the list of matching kernels
Parameters
----------
hrf_model : string or None,
identifier of the hrf model
tr : float
the repetition time in seconds
oversampling : int, optional
temporal oversampling factor to have a smooth hrf
fir_delays : list of floats,
list of delays for finite impulse response models
Returns
-------
hkernel : list of arrays
samples of the hrf (the number depends on the hrf_model used) | [
"Given",
"the",
"specification",
"of",
"the",
"hemodynamic",
"model",
"and",
"time",
"parameters",
"return",
"the",
"list",
"of",
"matching",
"kernels"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L378-L432 | train |
bids-standard/pybids | bids/analysis/hrf.py | compute_regressor | def compute_regressor(exp_condition, hrf_model, frame_times, con_id='cond',
oversampling=50, fir_delays=None, min_onset=-24):
""" This is the main function to convolve regressors with hrf model
Parameters
----------
exp_condition : array-like of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',
'glover', 'glover + derivative', 'fir', None}
Name of the hrf model to be used
frame_times : array of shape (n_scans)
the desired sampling times
con_id : string
optional identifier of the condition
oversampling : int, optional
oversampling factor to perform the convolution
fir_delays : 1D-array-like, optional
delays (in seconds) used in case of a finite impulse reponse model
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
computed_regressors: array of shape(n_scans, n_reg)
computed regressors sampled at frame times
reg_names: list of strings
corresponding regressor names
Notes
-----
The different hemodynamic models can be understood as follows:
'spm': this is the hrf model used in SPM
'spm + derivative': SPM model plus its time derivative (2 regressors)
'spm + time + dispersion': idem, plus dispersion derivative (3 regressors)
'glover': this one corresponds to the Glover hrf
'glover + derivative': the Glover hrf + time derivative (2 regressors)
'glover + derivative + dispersion': idem + dispersion derivative
(3 regressors)
'fir': finite impulse response basis, a set of delayed dirac models
with arbitrary length. This one currently assumes regularly spaced
frame times (i.e. fixed time of repetition).
It is expected that spm standard and Glover model would not yield
large differences in most cases.
In case of glover and spm models, the derived regressors are
orthogonalized wrt the main one.
"""
# this is the average tr in this session, not necessarily the true tr
tr = float(frame_times.max()) / (np.size(frame_times) - 1)
# 1. create the high temporal resolution regressor
hr_regressor, hr_frame_times = _sample_condition(
exp_condition, frame_times, oversampling, min_onset)
# 2. create the hrf model(s)
hkernel = _hrf_kernel(hrf_model, tr, oversampling, fir_delays)
# 3. convolve the regressor and hrf, and downsample the regressor
conv_reg = np.array([np.convolve(hr_regressor, h)[:hr_regressor.size]
for h in hkernel])
# 4. temporally resample the regressors
computed_regressors = _resample_regressor(
conv_reg, hr_frame_times, frame_times)
# 5. ortogonalize the regressors
if hrf_model != 'fir':
computed_regressors = _orthogonalize(computed_regressors)
# 6 generate regressor names
reg_names = _regressor_names(con_id, hrf_model, fir_delays=fir_delays)
return computed_regressors, reg_names | python | def compute_regressor(exp_condition, hrf_model, frame_times, con_id='cond',
oversampling=50, fir_delays=None, min_onset=-24):
""" This is the main function to convolve regressors with hrf model
Parameters
----------
exp_condition : array-like of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',
'glover', 'glover + derivative', 'fir', None}
Name of the hrf model to be used
frame_times : array of shape (n_scans)
the desired sampling times
con_id : string
optional identifier of the condition
oversampling : int, optional
oversampling factor to perform the convolution
fir_delays : 1D-array-like, optional
delays (in seconds) used in case of a finite impulse reponse model
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
computed_regressors: array of shape(n_scans, n_reg)
computed regressors sampled at frame times
reg_names: list of strings
corresponding regressor names
Notes
-----
The different hemodynamic models can be understood as follows:
'spm': this is the hrf model used in SPM
'spm + derivative': SPM model plus its time derivative (2 regressors)
'spm + time + dispersion': idem, plus dispersion derivative (3 regressors)
'glover': this one corresponds to the Glover hrf
'glover + derivative': the Glover hrf + time derivative (2 regressors)
'glover + derivative + dispersion': idem + dispersion derivative
(3 regressors)
'fir': finite impulse response basis, a set of delayed dirac models
with arbitrary length. This one currently assumes regularly spaced
frame times (i.e. fixed time of repetition).
It is expected that spm standard and Glover model would not yield
large differences in most cases.
In case of glover and spm models, the derived regressors are
orthogonalized wrt the main one.
"""
# this is the average tr in this session, not necessarily the true tr
tr = float(frame_times.max()) / (np.size(frame_times) - 1)
# 1. create the high temporal resolution regressor
hr_regressor, hr_frame_times = _sample_condition(
exp_condition, frame_times, oversampling, min_onset)
# 2. create the hrf model(s)
hkernel = _hrf_kernel(hrf_model, tr, oversampling, fir_delays)
# 3. convolve the regressor and hrf, and downsample the regressor
conv_reg = np.array([np.convolve(hr_regressor, h)[:hr_regressor.size]
for h in hkernel])
# 4. temporally resample the regressors
computed_regressors = _resample_regressor(
conv_reg, hr_frame_times, frame_times)
# 5. ortogonalize the regressors
if hrf_model != 'fir':
computed_regressors = _orthogonalize(computed_regressors)
# 6 generate regressor names
reg_names = _regressor_names(con_id, hrf_model, fir_delays=fir_delays)
return computed_regressors, reg_names | [
"def",
"compute_regressor",
"(",
"exp_condition",
",",
"hrf_model",
",",
"frame_times",
",",
"con_id",
"=",
"'cond'",
",",
"oversampling",
"=",
"50",
",",
"fir_delays",
"=",
"None",
",",
"min_onset",
"=",
"-",
"24",
")",
":",
"# this is the average tr in this session, not necessarily the true tr",
"tr",
"=",
"float",
"(",
"frame_times",
".",
"max",
"(",
")",
")",
"/",
"(",
"np",
".",
"size",
"(",
"frame_times",
")",
"-",
"1",
")",
"# 1. create the high temporal resolution regressor",
"hr_regressor",
",",
"hr_frame_times",
"=",
"_sample_condition",
"(",
"exp_condition",
",",
"frame_times",
",",
"oversampling",
",",
"min_onset",
")",
"# 2. create the hrf model(s)",
"hkernel",
"=",
"_hrf_kernel",
"(",
"hrf_model",
",",
"tr",
",",
"oversampling",
",",
"fir_delays",
")",
"# 3. convolve the regressor and hrf, and downsample the regressor",
"conv_reg",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"convolve",
"(",
"hr_regressor",
",",
"h",
")",
"[",
":",
"hr_regressor",
".",
"size",
"]",
"for",
"h",
"in",
"hkernel",
"]",
")",
"# 4. temporally resample the regressors",
"computed_regressors",
"=",
"_resample_regressor",
"(",
"conv_reg",
",",
"hr_frame_times",
",",
"frame_times",
")",
"# 5. ortogonalize the regressors",
"if",
"hrf_model",
"!=",
"'fir'",
":",
"computed_regressors",
"=",
"_orthogonalize",
"(",
"computed_regressors",
")",
"# 6 generate regressor names",
"reg_names",
"=",
"_regressor_names",
"(",
"con_id",
",",
"hrf_model",
",",
"fir_delays",
"=",
"fir_delays",
")",
"return",
"computed_regressors",
",",
"reg_names"
] | This is the main function to convolve regressors with hrf model
Parameters
----------
exp_condition : array-like of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',
'glover', 'glover + derivative', 'fir', None}
Name of the hrf model to be used
frame_times : array of shape (n_scans)
the desired sampling times
con_id : string
optional identifier of the condition
oversampling : int, optional
oversampling factor to perform the convolution
fir_delays : 1D-array-like, optional
delays (in seconds) used in case of a finite impulse reponse model
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
computed_regressors: array of shape(n_scans, n_reg)
computed regressors sampled at frame times
reg_names: list of strings
corresponding regressor names
Notes
-----
The different hemodynamic models can be understood as follows:
'spm': this is the hrf model used in SPM
'spm + derivative': SPM model plus its time derivative (2 regressors)
'spm + time + dispersion': idem, plus dispersion derivative (3 regressors)
'glover': this one corresponds to the Glover hrf
'glover + derivative': the Glover hrf + time derivative (2 regressors)
'glover + derivative + dispersion': idem + dispersion derivative
(3 regressors)
'fir': finite impulse response basis, a set of delayed dirac models
with arbitrary length. This one currently assumes regularly spaced
frame times (i.e. fixed time of repetition).
It is expected that spm standard and Glover model would not yield
large differences in most cases.
In case of glover and spm models, the derived regressors are
orthogonalized wrt the main one. | [
"This",
"is",
"the",
"main",
"function",
"to",
"convolve",
"regressors",
"with",
"hrf",
"model"
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L435-L516 | train |
bids-standard/pybids | bids/utils.py | matches_entities | def matches_entities(obj, entities, strict=False):
''' Checks whether an object's entities match the input. '''
if strict and set(obj.entities.keys()) != set(entities.keys()):
return False
comm_ents = list(set(obj.entities.keys()) & set(entities.keys()))
for k in comm_ents:
current = obj.entities[k]
target = entities[k]
if isinstance(target, (list, tuple)):
if current not in target:
return False
elif current != target:
return False
return True | python | def matches_entities(obj, entities, strict=False):
''' Checks whether an object's entities match the input. '''
if strict and set(obj.entities.keys()) != set(entities.keys()):
return False
comm_ents = list(set(obj.entities.keys()) & set(entities.keys()))
for k in comm_ents:
current = obj.entities[k]
target = entities[k]
if isinstance(target, (list, tuple)):
if current not in target:
return False
elif current != target:
return False
return True | [
"def",
"matches_entities",
"(",
"obj",
",",
"entities",
",",
"strict",
"=",
"False",
")",
":",
"if",
"strict",
"and",
"set",
"(",
"obj",
".",
"entities",
".",
"keys",
"(",
")",
")",
"!=",
"set",
"(",
"entities",
".",
"keys",
"(",
")",
")",
":",
"return",
"False",
"comm_ents",
"=",
"list",
"(",
"set",
"(",
"obj",
".",
"entities",
".",
"keys",
"(",
")",
")",
"&",
"set",
"(",
"entities",
".",
"keys",
"(",
")",
")",
")",
"for",
"k",
"in",
"comm_ents",
":",
"current",
"=",
"obj",
".",
"entities",
"[",
"k",
"]",
"target",
"=",
"entities",
"[",
"k",
"]",
"if",
"isinstance",
"(",
"target",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"current",
"not",
"in",
"target",
":",
"return",
"False",
"elif",
"current",
"!=",
"target",
":",
"return",
"False",
"return",
"True"
] | Checks whether an object's entities match the input. | [
"Checks",
"whether",
"an",
"object",
"s",
"entities",
"match",
"the",
"input",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/utils.py#L12-L26 | train |
bids-standard/pybids | bids/utils.py | check_path_matches_patterns | def check_path_matches_patterns(path, patterns):
''' Check if the path matches at least one of the provided patterns. '''
path = os.path.abspath(path)
for patt in patterns:
if isinstance(patt, six.string_types):
if path == patt:
return True
elif patt.search(path):
return True
return False | python | def check_path_matches_patterns(path, patterns):
''' Check if the path matches at least one of the provided patterns. '''
path = os.path.abspath(path)
for patt in patterns:
if isinstance(patt, six.string_types):
if path == patt:
return True
elif patt.search(path):
return True
return False | [
"def",
"check_path_matches_patterns",
"(",
"path",
",",
"patterns",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"for",
"patt",
"in",
"patterns",
":",
"if",
"isinstance",
"(",
"patt",
",",
"six",
".",
"string_types",
")",
":",
"if",
"path",
"==",
"patt",
":",
"return",
"True",
"elif",
"patt",
".",
"search",
"(",
"path",
")",
":",
"return",
"True",
"return",
"False"
] | Check if the path matches at least one of the provided patterns. | [
"Check",
"if",
"the",
"path",
"matches",
"at",
"least",
"one",
"of",
"the",
"provided",
"patterns",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/utils.py#L91-L100 | train |
bids-standard/pybids | bids/layout/core.py | Entity.count | def count(self, files=False):
""" Returns a count of unique values or files.
Args:
files (bool): When True, counts all files mapped to the Entity.
When False, counts all unique values.
Returns: an int.
"""
return len(self.files) if files else len(self.unique()) | python | def count(self, files=False):
""" Returns a count of unique values or files.
Args:
files (bool): When True, counts all files mapped to the Entity.
When False, counts all unique values.
Returns: an int.
"""
return len(self.files) if files else len(self.unique()) | [
"def",
"count",
"(",
"self",
",",
"files",
"=",
"False",
")",
":",
"return",
"len",
"(",
"self",
".",
"files",
")",
"if",
"files",
"else",
"len",
"(",
"self",
".",
"unique",
"(",
")",
")"
] | Returns a count of unique values or files.
Args:
files (bool): When True, counts all files mapped to the Entity.
When False, counts all unique values.
Returns: an int. | [
"Returns",
"a",
"count",
"of",
"unique",
"values",
"or",
"files",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/core.py#L147-L155 | train |
bids-standard/pybids | bids/reports/parsing.py | general_acquisition_info | def general_acquisition_info(metadata):
"""
General sentence on data acquisition. Should be first sentence in MRI data
acquisition section.
Parameters
----------
metadata : :obj:`dict`
The metadata for the dataset.
Returns
-------
out_str : :obj:`str`
Output string with scanner information.
"""
out_str = ('MR data were acquired using a {tesla}-Tesla {manu} {model} '
'MRI scanner.')
out_str = out_str.format(tesla=metadata.get('MagneticFieldStrength',
'UNKNOWN'),
manu=metadata.get('Manufacturer', 'MANUFACTURER'),
model=metadata.get('ManufacturersModelName',
'MODEL'))
return out_str | python | def general_acquisition_info(metadata):
"""
General sentence on data acquisition. Should be first sentence in MRI data
acquisition section.
Parameters
----------
metadata : :obj:`dict`
The metadata for the dataset.
Returns
-------
out_str : :obj:`str`
Output string with scanner information.
"""
out_str = ('MR data were acquired using a {tesla}-Tesla {manu} {model} '
'MRI scanner.')
out_str = out_str.format(tesla=metadata.get('MagneticFieldStrength',
'UNKNOWN'),
manu=metadata.get('Manufacturer', 'MANUFACTURER'),
model=metadata.get('ManufacturersModelName',
'MODEL'))
return out_str | [
"def",
"general_acquisition_info",
"(",
"metadata",
")",
":",
"out_str",
"=",
"(",
"'MR data were acquired using a {tesla}-Tesla {manu} {model} '",
"'MRI scanner.'",
")",
"out_str",
"=",
"out_str",
".",
"format",
"(",
"tesla",
"=",
"metadata",
".",
"get",
"(",
"'MagneticFieldStrength'",
",",
"'UNKNOWN'",
")",
",",
"manu",
"=",
"metadata",
".",
"get",
"(",
"'Manufacturer'",
",",
"'MANUFACTURER'",
")",
",",
"model",
"=",
"metadata",
".",
"get",
"(",
"'ManufacturersModelName'",
",",
"'MODEL'",
")",
")",
"return",
"out_str"
] | General sentence on data acquisition. Should be first sentence in MRI data
acquisition section.
Parameters
----------
metadata : :obj:`dict`
The metadata for the dataset.
Returns
-------
out_str : :obj:`str`
Output string with scanner information. | [
"General",
"sentence",
"on",
"data",
"acquisition",
".",
"Should",
"be",
"first",
"sentence",
"in",
"MRI",
"data",
"acquisition",
"section",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/parsing.py#L22-L44 | train |
bids-standard/pybids | bids/reports/parsing.py | parse_niftis | def parse_niftis(layout, niftis, subj, config, **kwargs):
"""
Loop through niftis in a BIDSLayout and generate the appropriate description
type for each scan. Compile all of the descriptions into a list.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
niftis : :obj:`list` or :obj:`grabbit.core.File`
List of nifti files in layout corresponding to subject/session combo.
subj : :obj:`str`
Subject ID.
config : :obj:`dict`
Configuration info for methods generation.
"""
kwargs = {k: v for k, v in kwargs.items() if v is not None}
description_list = []
skip_task = {} # Only report each task once
for nifti_struct in niftis:
nii_file = nifti_struct.path
metadata = layout.get_metadata(nii_file)
if not metadata:
LOGGER.warning('No json file found for %s', nii_file)
else:
import nibabel as nib
img = nib.load(nii_file)
# Assume all data were acquired the same way.
if not description_list:
description_list.append(general_acquisition_info(metadata))
if nifti_struct.entities['datatype'] == 'func':
if not skip_task.get(nifti_struct.entities['task'], False):
echos = layout.get_echoes(subject=subj, extensions='nii.gz',
task=nifti_struct.entities['task'],
**kwargs)
n_echos = len(echos)
if n_echos > 0:
metadata['EchoTime'] = []
for echo in sorted(echos):
echo_struct = layout.get(subject=subj, echo=echo,
extensions='nii.gz',
task=nifti_struct.entities['task'],
**kwargs)[0]
echo_file = echo_struct.path
echo_meta = layout.get_metadata(echo_file)
metadata['EchoTime'].append(echo_meta['EchoTime'])
n_runs = len(layout.get_runs(subject=subj,
task=nifti_struct.entities['task'],
**kwargs))
description_list.append(func_info(nifti_struct.entities['task'],
n_runs, metadata, img,
config))
skip_task[nifti_struct.entities['task']] = True
elif nifti_struct.entities['datatype'] == 'anat':
suffix = nifti_struct.entities['suffix']
if suffix.endswith('w'):
suffix = suffix[:-1] + '-weighted'
description_list.append(anat_info(suffix, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'dwi':
bval_file = nii_file.replace('.nii.gz', '.bval')
description_list.append(dwi_info(bval_file, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'fmap':
description_list.append(fmap_info(metadata, img, config,
layout))
return description_list | python | def parse_niftis(layout, niftis, subj, config, **kwargs):
"""
Loop through niftis in a BIDSLayout and generate the appropriate description
type for each scan. Compile all of the descriptions into a list.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
niftis : :obj:`list` or :obj:`grabbit.core.File`
List of nifti files in layout corresponding to subject/session combo.
subj : :obj:`str`
Subject ID.
config : :obj:`dict`
Configuration info for methods generation.
"""
kwargs = {k: v for k, v in kwargs.items() if v is not None}
description_list = []
skip_task = {} # Only report each task once
for nifti_struct in niftis:
nii_file = nifti_struct.path
metadata = layout.get_metadata(nii_file)
if not metadata:
LOGGER.warning('No json file found for %s', nii_file)
else:
import nibabel as nib
img = nib.load(nii_file)
# Assume all data were acquired the same way.
if not description_list:
description_list.append(general_acquisition_info(metadata))
if nifti_struct.entities['datatype'] == 'func':
if not skip_task.get(nifti_struct.entities['task'], False):
echos = layout.get_echoes(subject=subj, extensions='nii.gz',
task=nifti_struct.entities['task'],
**kwargs)
n_echos = len(echos)
if n_echos > 0:
metadata['EchoTime'] = []
for echo in sorted(echos):
echo_struct = layout.get(subject=subj, echo=echo,
extensions='nii.gz',
task=nifti_struct.entities['task'],
**kwargs)[0]
echo_file = echo_struct.path
echo_meta = layout.get_metadata(echo_file)
metadata['EchoTime'].append(echo_meta['EchoTime'])
n_runs = len(layout.get_runs(subject=subj,
task=nifti_struct.entities['task'],
**kwargs))
description_list.append(func_info(nifti_struct.entities['task'],
n_runs, metadata, img,
config))
skip_task[nifti_struct.entities['task']] = True
elif nifti_struct.entities['datatype'] == 'anat':
suffix = nifti_struct.entities['suffix']
if suffix.endswith('w'):
suffix = suffix[:-1] + '-weighted'
description_list.append(anat_info(suffix, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'dwi':
bval_file = nii_file.replace('.nii.gz', '.bval')
description_list.append(dwi_info(bval_file, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'fmap':
description_list.append(fmap_info(metadata, img, config,
layout))
return description_list | [
"def",
"parse_niftis",
"(",
"layout",
",",
"niftis",
",",
"subj",
",",
"config",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"description_list",
"=",
"[",
"]",
"skip_task",
"=",
"{",
"}",
"# Only report each task once",
"for",
"nifti_struct",
"in",
"niftis",
":",
"nii_file",
"=",
"nifti_struct",
".",
"path",
"metadata",
"=",
"layout",
".",
"get_metadata",
"(",
"nii_file",
")",
"if",
"not",
"metadata",
":",
"LOGGER",
".",
"warning",
"(",
"'No json file found for %s'",
",",
"nii_file",
")",
"else",
":",
"import",
"nibabel",
"as",
"nib",
"img",
"=",
"nib",
".",
"load",
"(",
"nii_file",
")",
"# Assume all data were acquired the same way.",
"if",
"not",
"description_list",
":",
"description_list",
".",
"append",
"(",
"general_acquisition_info",
"(",
"metadata",
")",
")",
"if",
"nifti_struct",
".",
"entities",
"[",
"'datatype'",
"]",
"==",
"'func'",
":",
"if",
"not",
"skip_task",
".",
"get",
"(",
"nifti_struct",
".",
"entities",
"[",
"'task'",
"]",
",",
"False",
")",
":",
"echos",
"=",
"layout",
".",
"get_echoes",
"(",
"subject",
"=",
"subj",
",",
"extensions",
"=",
"'nii.gz'",
",",
"task",
"=",
"nifti_struct",
".",
"entities",
"[",
"'task'",
"]",
",",
"*",
"*",
"kwargs",
")",
"n_echos",
"=",
"len",
"(",
"echos",
")",
"if",
"n_echos",
">",
"0",
":",
"metadata",
"[",
"'EchoTime'",
"]",
"=",
"[",
"]",
"for",
"echo",
"in",
"sorted",
"(",
"echos",
")",
":",
"echo_struct",
"=",
"layout",
".",
"get",
"(",
"subject",
"=",
"subj",
",",
"echo",
"=",
"echo",
",",
"extensions",
"=",
"'nii.gz'",
",",
"task",
"=",
"nifti_struct",
".",
"entities",
"[",
"'task'",
"]",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"echo_file",
"=",
"echo_struct",
".",
"path",
"echo_meta",
"=",
"layout",
".",
"get_metadata",
"(",
"echo_file",
")",
"metadata",
"[",
"'EchoTime'",
"]",
".",
"append",
"(",
"echo_meta",
"[",
"'EchoTime'",
"]",
")",
"n_runs",
"=",
"len",
"(",
"layout",
".",
"get_runs",
"(",
"subject",
"=",
"subj",
",",
"task",
"=",
"nifti_struct",
".",
"entities",
"[",
"'task'",
"]",
",",
"*",
"*",
"kwargs",
")",
")",
"description_list",
".",
"append",
"(",
"func_info",
"(",
"nifti_struct",
".",
"entities",
"[",
"'task'",
"]",
",",
"n_runs",
",",
"metadata",
",",
"img",
",",
"config",
")",
")",
"skip_task",
"[",
"nifti_struct",
".",
"entities",
"[",
"'task'",
"]",
"]",
"=",
"True",
"elif",
"nifti_struct",
".",
"entities",
"[",
"'datatype'",
"]",
"==",
"'anat'",
":",
"suffix",
"=",
"nifti_struct",
".",
"entities",
"[",
"'suffix'",
"]",
"if",
"suffix",
".",
"endswith",
"(",
"'w'",
")",
":",
"suffix",
"=",
"suffix",
"[",
":",
"-",
"1",
"]",
"+",
"'-weighted'",
"description_list",
".",
"append",
"(",
"anat_info",
"(",
"suffix",
",",
"metadata",
",",
"img",
",",
"config",
")",
")",
"elif",
"nifti_struct",
".",
"entities",
"[",
"'datatype'",
"]",
"==",
"'dwi'",
":",
"bval_file",
"=",
"nii_file",
".",
"replace",
"(",
"'.nii.gz'",
",",
"'.bval'",
")",
"description_list",
".",
"append",
"(",
"dwi_info",
"(",
"bval_file",
",",
"metadata",
",",
"img",
",",
"config",
")",
")",
"elif",
"nifti_struct",
".",
"entities",
"[",
"'datatype'",
"]",
"==",
"'fmap'",
":",
"description_list",
".",
"append",
"(",
"fmap_info",
"(",
"metadata",
",",
"img",
",",
"config",
",",
"layout",
")",
")",
"return",
"description_list"
] | Loop through niftis in a BIDSLayout and generate the appropriate description
type for each scan. Compile all of the descriptions into a list.
Parameters
----------
layout : :obj:`bids.layout.BIDSLayout`
Layout object for a BIDS dataset.
niftis : :obj:`list` or :obj:`grabbit.core.File`
List of nifti files in layout corresponding to subject/session combo.
subj : :obj:`str`
Subject ID.
config : :obj:`dict`
Configuration info for methods generation. | [
"Loop",
"through",
"niftis",
"in",
"a",
"BIDSLayout",
"and",
"generate",
"the",
"appropriate",
"description",
"type",
"for",
"each",
"scan",
".",
"Compile",
"all",
"of",
"the",
"descriptions",
"into",
"a",
"list",
"."
] | 30d924ce770622bda0e390d613a8da42a2a20c32 | https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/parsing.py#L407-L479 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/TelemetryClient.py | TelemetryClient.track_exception | def track_exception(self, type=None, value=None, tb=None, properties=None, measurements=None):
""" Send information about a single exception that occurred in the application.
Args:
type (Type). the type of the exception that was thrown.\n
value (:class:`Exception`). the exception that the client wants to send.\n
tb (:class:`Traceback`). the traceback information as returned by :func:`sys.exc_info`.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
if not type or not value or not tb:
type, value, tb = sys.exc_info()
if not type or not value or not tb:
try:
raise Exception(NULL_CONSTANT_STRING)
except:
type, value, tb = sys.exc_info()
details = channel.contracts.ExceptionDetails()
details.id = 1
details.outer_id = 0
details.type_name = type.__name__
details.message = str(value)
details.has_full_stack = True
counter = 0
for tb_frame_file, tb_frame_line, tb_frame_function, tb_frame_text in traceback.extract_tb(tb):
frame = channel.contracts.StackFrame()
frame.assembly = 'Unknown'
frame.file_name = tb_frame_file
frame.level = counter
frame.line = tb_frame_line
frame.method = tb_frame_function
details.parsed_stack.append(frame)
counter += 1
details.parsed_stack.reverse()
data = channel.contracts.ExceptionData()
data.handled_at = 'UserCode'
data.exceptions.append(details)
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | python | def track_exception(self, type=None, value=None, tb=None, properties=None, measurements=None):
""" Send information about a single exception that occurred in the application.
Args:
type (Type). the type of the exception that was thrown.\n
value (:class:`Exception`). the exception that the client wants to send.\n
tb (:class:`Traceback`). the traceback information as returned by :func:`sys.exc_info`.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
if not type or not value or not tb:
type, value, tb = sys.exc_info()
if not type or not value or not tb:
try:
raise Exception(NULL_CONSTANT_STRING)
except:
type, value, tb = sys.exc_info()
details = channel.contracts.ExceptionDetails()
details.id = 1
details.outer_id = 0
details.type_name = type.__name__
details.message = str(value)
details.has_full_stack = True
counter = 0
for tb_frame_file, tb_frame_line, tb_frame_function, tb_frame_text in traceback.extract_tb(tb):
frame = channel.contracts.StackFrame()
frame.assembly = 'Unknown'
frame.file_name = tb_frame_file
frame.level = counter
frame.line = tb_frame_line
frame.method = tb_frame_function
details.parsed_stack.append(frame)
counter += 1
details.parsed_stack.reverse()
data = channel.contracts.ExceptionData()
data.handled_at = 'UserCode'
data.exceptions.append(details)
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | [
"def",
"track_exception",
"(",
"self",
",",
"type",
"=",
"None",
",",
"value",
"=",
"None",
",",
"tb",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"measurements",
"=",
"None",
")",
":",
"if",
"not",
"type",
"or",
"not",
"value",
"or",
"not",
"tb",
":",
"type",
",",
"value",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"if",
"not",
"type",
"or",
"not",
"value",
"or",
"not",
"tb",
":",
"try",
":",
"raise",
"Exception",
"(",
"NULL_CONSTANT_STRING",
")",
"except",
":",
"type",
",",
"value",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"details",
"=",
"channel",
".",
"contracts",
".",
"ExceptionDetails",
"(",
")",
"details",
".",
"id",
"=",
"1",
"details",
".",
"outer_id",
"=",
"0",
"details",
".",
"type_name",
"=",
"type",
".",
"__name__",
"details",
".",
"message",
"=",
"str",
"(",
"value",
")",
"details",
".",
"has_full_stack",
"=",
"True",
"counter",
"=",
"0",
"for",
"tb_frame_file",
",",
"tb_frame_line",
",",
"tb_frame_function",
",",
"tb_frame_text",
"in",
"traceback",
".",
"extract_tb",
"(",
"tb",
")",
":",
"frame",
"=",
"channel",
".",
"contracts",
".",
"StackFrame",
"(",
")",
"frame",
".",
"assembly",
"=",
"'Unknown'",
"frame",
".",
"file_name",
"=",
"tb_frame_file",
"frame",
".",
"level",
"=",
"counter",
"frame",
".",
"line",
"=",
"tb_frame_line",
"frame",
".",
"method",
"=",
"tb_frame_function",
"details",
".",
"parsed_stack",
".",
"append",
"(",
"frame",
")",
"counter",
"+=",
"1",
"details",
".",
"parsed_stack",
".",
"reverse",
"(",
")",
"data",
"=",
"channel",
".",
"contracts",
".",
"ExceptionData",
"(",
")",
"data",
".",
"handled_at",
"=",
"'UserCode'",
"data",
".",
"exceptions",
".",
"append",
"(",
"details",
")",
"if",
"properties",
":",
"data",
".",
"properties",
"=",
"properties",
"if",
"measurements",
":",
"data",
".",
"measurements",
"=",
"measurements",
"self",
".",
"track",
"(",
"data",
",",
"self",
".",
"_context",
")"
] | Send information about a single exception that occurred in the application.
Args:
type (Type). the type of the exception that was thrown.\n
value (:class:`Exception`). the exception that the client wants to send.\n
tb (:class:`Traceback`). the traceback information as returned by :func:`sys.exc_info`.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None) | [
"Send",
"information",
"about",
"a",
"single",
"exception",
"that",
"occurred",
"in",
"the",
"application",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/TelemetryClient.py#L82-L126 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/TelemetryClient.py | TelemetryClient.track_event | def track_event(self, name, properties=None, measurements=None):
""" Send information about a single event that has occurred in the context of the application.
Args:
name (str). the data to associate to this event.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
data = channel.contracts.EventData()
data.name = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | python | def track_event(self, name, properties=None, measurements=None):
""" Send information about a single event that has occurred in the context of the application.
Args:
name (str). the data to associate to this event.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
data = channel.contracts.EventData()
data.name = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | [
"def",
"track_event",
"(",
"self",
",",
"name",
",",
"properties",
"=",
"None",
",",
"measurements",
"=",
"None",
")",
":",
"data",
"=",
"channel",
".",
"contracts",
".",
"EventData",
"(",
")",
"data",
".",
"name",
"=",
"name",
"or",
"NULL_CONSTANT_STRING",
"if",
"properties",
":",
"data",
".",
"properties",
"=",
"properties",
"if",
"measurements",
":",
"data",
".",
"measurements",
"=",
"measurements",
"self",
".",
"track",
"(",
"data",
",",
"self",
".",
"_context",
")"
] | Send information about a single event that has occurred in the context of the application.
Args:
name (str). the data to associate to this event.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None) | [
"Send",
"information",
"about",
"a",
"single",
"event",
"that",
"has",
"occurred",
"in",
"the",
"context",
"of",
"the",
"application",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/TelemetryClient.py#L128-L143 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/TelemetryClient.py | TelemetryClient.track_metric | def track_metric(self, name, value, type=None, count=None, min=None, max=None, std_dev=None, properties=None):
"""Send information about a single metric data point that was captured for the application.
Args:
name (str). the name of the metric that was captured.\n
value (float). the value of the metric that was captured.\n
type (:class:`channel.contracts.DataPointType`). the type of the metric. (defaults to: :func:`channel.contracts.DataPointType.aggregation`)\n
count (int). the number of metrics that were aggregated into this data point. (defaults to: None)\n
min (float). the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)\n
max (float). the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)\n
std_dev (float). the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)
"""
dataPoint = channel.contracts.DataPoint()
dataPoint.name = name or NULL_CONSTANT_STRING
dataPoint.value = value or 0
dataPoint.kind = type or channel.contracts.DataPointType.aggregation
dataPoint.count = count
dataPoint.min = min
dataPoint.max = max
dataPoint.std_dev = std_dev
data = channel.contracts.MetricData()
data.metrics.append(dataPoint)
if properties:
data.properties = properties
self.track(data, self._context) | python | def track_metric(self, name, value, type=None, count=None, min=None, max=None, std_dev=None, properties=None):
"""Send information about a single metric data point that was captured for the application.
Args:
name (str). the name of the metric that was captured.\n
value (float). the value of the metric that was captured.\n
type (:class:`channel.contracts.DataPointType`). the type of the metric. (defaults to: :func:`channel.contracts.DataPointType.aggregation`)\n
count (int). the number of metrics that were aggregated into this data point. (defaults to: None)\n
min (float). the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)\n
max (float). the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)\n
std_dev (float). the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)
"""
dataPoint = channel.contracts.DataPoint()
dataPoint.name = name or NULL_CONSTANT_STRING
dataPoint.value = value or 0
dataPoint.kind = type or channel.contracts.DataPointType.aggregation
dataPoint.count = count
dataPoint.min = min
dataPoint.max = max
dataPoint.std_dev = std_dev
data = channel.contracts.MetricData()
data.metrics.append(dataPoint)
if properties:
data.properties = properties
self.track(data, self._context) | [
"def",
"track_metric",
"(",
"self",
",",
"name",
",",
"value",
",",
"type",
"=",
"None",
",",
"count",
"=",
"None",
",",
"min",
"=",
"None",
",",
"max",
"=",
"None",
",",
"std_dev",
"=",
"None",
",",
"properties",
"=",
"None",
")",
":",
"dataPoint",
"=",
"channel",
".",
"contracts",
".",
"DataPoint",
"(",
")",
"dataPoint",
".",
"name",
"=",
"name",
"or",
"NULL_CONSTANT_STRING",
"dataPoint",
".",
"value",
"=",
"value",
"or",
"0",
"dataPoint",
".",
"kind",
"=",
"type",
"or",
"channel",
".",
"contracts",
".",
"DataPointType",
".",
"aggregation",
"dataPoint",
".",
"count",
"=",
"count",
"dataPoint",
".",
"min",
"=",
"min",
"dataPoint",
".",
"max",
"=",
"max",
"dataPoint",
".",
"std_dev",
"=",
"std_dev",
"data",
"=",
"channel",
".",
"contracts",
".",
"MetricData",
"(",
")",
"data",
".",
"metrics",
".",
"append",
"(",
"dataPoint",
")",
"if",
"properties",
":",
"data",
".",
"properties",
"=",
"properties",
"self",
".",
"track",
"(",
"data",
",",
"self",
".",
"_context",
")"
] | Send information about a single metric data point that was captured for the application.
Args:
name (str). the name of the metric that was captured.\n
value (float). the value of the metric that was captured.\n
type (:class:`channel.contracts.DataPointType`). the type of the metric. (defaults to: :func:`channel.contracts.DataPointType.aggregation`)\n
count (int). the number of metrics that were aggregated into this data point. (defaults to: None)\n
min (float). the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)\n
max (float). the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)\n
std_dev (float). the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None) | [
"Send",
"information",
"about",
"a",
"single",
"metric",
"data",
"point",
"that",
"was",
"captured",
"for",
"the",
"application",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/TelemetryClient.py#L145-L172 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/TelemetryClient.py | TelemetryClient.track_trace | def track_trace(self, name, properties=None, severity=None):
"""Sends a single trace statement.
Args:
name (str). the trace statement.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
"""
data = channel.contracts.MessageData()
data.message = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if severity is not None:
data.severity_level = channel.contracts.MessageData.PYTHON_LOGGING_LEVELS.get(severity)
self.track(data, self._context) | python | def track_trace(self, name, properties=None, severity=None):
"""Sends a single trace statement.
Args:
name (str). the trace statement.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
"""
data = channel.contracts.MessageData()
data.message = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if severity is not None:
data.severity_level = channel.contracts.MessageData.PYTHON_LOGGING_LEVELS.get(severity)
self.track(data, self._context) | [
"def",
"track_trace",
"(",
"self",
",",
"name",
",",
"properties",
"=",
"None",
",",
"severity",
"=",
"None",
")",
":",
"data",
"=",
"channel",
".",
"contracts",
".",
"MessageData",
"(",
")",
"data",
".",
"message",
"=",
"name",
"or",
"NULL_CONSTANT_STRING",
"if",
"properties",
":",
"data",
".",
"properties",
"=",
"properties",
"if",
"severity",
"is",
"not",
"None",
":",
"data",
".",
"severity_level",
"=",
"channel",
".",
"contracts",
".",
"MessageData",
".",
"PYTHON_LOGGING_LEVELS",
".",
"get",
"(",
"severity",
")",
"self",
".",
"track",
"(",
"data",
",",
"self",
".",
"_context",
")"
] | Sends a single trace statement.
Args:
name (str). the trace statement.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL | [
"Sends",
"a",
"single",
"trace",
"statement",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/TelemetryClient.py#L175-L190 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/TelemetryClient.py | TelemetryClient.track_request | def track_request(self, name, url, success, start_time=None, duration=None, response_code=None, http_method=None, properties=None, measurements=None, request_id=None):
"""Sends a single request that was captured for the application.
Args:
name (str). the name for this request. All requests with the same name will be grouped together.\n
url (str). the actual URL for this request (to show in individual request instances).\n
success (bool). true if the request ended in success, false otherwise.\n
start_time (str). the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)\n
duration (int). the number of milliseconds that this request lasted. (defaults to: None)\n
response_code (str). the response code that this request returned. (defaults to: None)\n
http_method (str). the HTTP method that triggered this request. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n
request_id (str). the id for this request. If None, a new uuid will be generated. (defaults to: None)
"""
data = channel.contracts.RequestData()
data.id = request_id or str(uuid.uuid4())
data.name = name
data.url = url
data.success = success
data.start_time = start_time or datetime.datetime.utcnow().isoformat() + 'Z'
data.duration = self.__ms_to_duration(duration)
data.response_code = str(response_code) or '200'
data.http_method = http_method or 'GET'
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | python | def track_request(self, name, url, success, start_time=None, duration=None, response_code=None, http_method=None, properties=None, measurements=None, request_id=None):
"""Sends a single request that was captured for the application.
Args:
name (str). the name for this request. All requests with the same name will be grouped together.\n
url (str). the actual URL for this request (to show in individual request instances).\n
success (bool). true if the request ended in success, false otherwise.\n
start_time (str). the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)\n
duration (int). the number of milliseconds that this request lasted. (defaults to: None)\n
response_code (str). the response code that this request returned. (defaults to: None)\n
http_method (str). the HTTP method that triggered this request. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n
request_id (str). the id for this request. If None, a new uuid will be generated. (defaults to: None)
"""
data = channel.contracts.RequestData()
data.id = request_id or str(uuid.uuid4())
data.name = name
data.url = url
data.success = success
data.start_time = start_time or datetime.datetime.utcnow().isoformat() + 'Z'
data.duration = self.__ms_to_duration(duration)
data.response_code = str(response_code) or '200'
data.http_method = http_method or 'GET'
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | [
"def",
"track_request",
"(",
"self",
",",
"name",
",",
"url",
",",
"success",
",",
"start_time",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"response_code",
"=",
"None",
",",
"http_method",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"measurements",
"=",
"None",
",",
"request_id",
"=",
"None",
")",
":",
"data",
"=",
"channel",
".",
"contracts",
".",
"RequestData",
"(",
")",
"data",
".",
"id",
"=",
"request_id",
"or",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"data",
".",
"name",
"=",
"name",
"data",
".",
"url",
"=",
"url",
"data",
".",
"success",
"=",
"success",
"data",
".",
"start_time",
"=",
"start_time",
"or",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
"+",
"'Z'",
"data",
".",
"duration",
"=",
"self",
".",
"__ms_to_duration",
"(",
"duration",
")",
"data",
".",
"response_code",
"=",
"str",
"(",
"response_code",
")",
"or",
"'200'",
"data",
".",
"http_method",
"=",
"http_method",
"or",
"'GET'",
"if",
"properties",
":",
"data",
".",
"properties",
"=",
"properties",
"if",
"measurements",
":",
"data",
".",
"measurements",
"=",
"measurements",
"self",
".",
"track",
"(",
"data",
",",
"self",
".",
"_context",
")"
] | Sends a single request that was captured for the application.
Args:
name (str). the name for this request. All requests with the same name will be grouped together.\n
url (str). the actual URL for this request (to show in individual request instances).\n
success (bool). true if the request ended in success, false otherwise.\n
start_time (str). the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)\n
duration (int). the number of milliseconds that this request lasted. (defaults to: None)\n
response_code (str). the response code that this request returned. (defaults to: None)\n
http_method (str). the HTTP method that triggered this request. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n
request_id (str). the id for this request. If None, a new uuid will be generated. (defaults to: None) | [
"Sends",
"a",
"single",
"request",
"that",
"was",
"captured",
"for",
"the",
"application",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/TelemetryClient.py#L193-L222 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/TelemetryClient.py | TelemetryClient.track_dependency | def track_dependency(self, name, data, type=None, target=None, duration=None, success=None, result_code=None, properties=None, measurements=None, dependency_id=None):
"""Sends a single dependency telemetry that was captured for the application.
Args:
name (str). the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.\n
data (str). the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.\n
type (str). the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)\n
target (str). the target site of a dependency call. Examples are server name, host address. (default to: None)\n
duration (int). the number of milliseconds that this dependency call lasted. (defaults to: None)\n
success (bool). true if the dependency call ended in success, false otherwise. (defaults to: None)\n
result_code (str). the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n
id (str). the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)
"""
dependency_data = channel.contracts.RemoteDependencyData()
dependency_data.id = dependency_id or str(uuid.uuid4())
dependency_data.name = name
dependency_data.data = data
dependency_data.type = type
dependency_data.target = target
dependency_data.duration = self.__ms_to_duration(duration)
dependency_data.success = success
dependency_data.result_code = str(result_code) or '200'
if properties:
dependency_data.properties = properties
if measurements:
dependency_data.measurements = measurements
self.track(dependency_data, self._context) | python | def track_dependency(self, name, data, type=None, target=None, duration=None, success=None, result_code=None, properties=None, measurements=None, dependency_id=None):
"""Sends a single dependency telemetry that was captured for the application.
Args:
name (str). the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.\n
data (str). the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.\n
type (str). the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)\n
target (str). the target site of a dependency call. Examples are server name, host address. (default to: None)\n
duration (int). the number of milliseconds that this dependency call lasted. (defaults to: None)\n
success (bool). true if the dependency call ended in success, false otherwise. (defaults to: None)\n
result_code (str). the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n
id (str). the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)
"""
dependency_data = channel.contracts.RemoteDependencyData()
dependency_data.id = dependency_id or str(uuid.uuid4())
dependency_data.name = name
dependency_data.data = data
dependency_data.type = type
dependency_data.target = target
dependency_data.duration = self.__ms_to_duration(duration)
dependency_data.success = success
dependency_data.result_code = str(result_code) or '200'
if properties:
dependency_data.properties = properties
if measurements:
dependency_data.measurements = measurements
self.track(dependency_data, self._context) | [
"def",
"track_dependency",
"(",
"self",
",",
"name",
",",
"data",
",",
"type",
"=",
"None",
",",
"target",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"success",
"=",
"None",
",",
"result_code",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"measurements",
"=",
"None",
",",
"dependency_id",
"=",
"None",
")",
":",
"dependency_data",
"=",
"channel",
".",
"contracts",
".",
"RemoteDependencyData",
"(",
")",
"dependency_data",
".",
"id",
"=",
"dependency_id",
"or",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"dependency_data",
".",
"name",
"=",
"name",
"dependency_data",
".",
"data",
"=",
"data",
"dependency_data",
".",
"type",
"=",
"type",
"dependency_data",
".",
"target",
"=",
"target",
"dependency_data",
".",
"duration",
"=",
"self",
".",
"__ms_to_duration",
"(",
"duration",
")",
"dependency_data",
".",
"success",
"=",
"success",
"dependency_data",
".",
"result_code",
"=",
"str",
"(",
"result_code",
")",
"or",
"'200'",
"if",
"properties",
":",
"dependency_data",
".",
"properties",
"=",
"properties",
"if",
"measurements",
":",
"dependency_data",
".",
"measurements",
"=",
"measurements",
"self",
".",
"track",
"(",
"dependency_data",
",",
"self",
".",
"_context",
")"
] | Sends a single dependency telemetry that was captured for the application.
Args:
name (str). the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.\n
data (str). the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.\n
type (str). the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)\n
target (str). the target site of a dependency call. Examples are server name, host address. (default to: None)\n
duration (int). the number of milliseconds that this dependency call lasted. (defaults to: None)\n
success (bool). true if the dependency call ended in success, false otherwise. (defaults to: None)\n
result_code (str). the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n
id (str). the id for this dependency call. If None, a new uuid will be generated. (defaults to: None) | [
"Sends",
"a",
"single",
"dependency",
"telemetry",
"that",
"was",
"captured",
"for",
"the",
"application",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/TelemetryClient.py#L224-L253 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/django/common.py | dummy_client | def dummy_client(reason):
"""Creates a dummy channel so even if we're not logging telemetry, we can still send
along the real object to things that depend on it to exist"""
sender = applicationinsights.channel.NullSender()
queue = applicationinsights.channel.SynchronousQueue(sender)
channel = applicationinsights.channel.TelemetryChannel(None, queue)
return applicationinsights.TelemetryClient("00000000-0000-0000-0000-000000000000", channel) | python | def dummy_client(reason):
"""Creates a dummy channel so even if we're not logging telemetry, we can still send
along the real object to things that depend on it to exist"""
sender = applicationinsights.channel.NullSender()
queue = applicationinsights.channel.SynchronousQueue(sender)
channel = applicationinsights.channel.TelemetryChannel(None, queue)
return applicationinsights.TelemetryClient("00000000-0000-0000-0000-000000000000", channel) | [
"def",
"dummy_client",
"(",
"reason",
")",
":",
"sender",
"=",
"applicationinsights",
".",
"channel",
".",
"NullSender",
"(",
")",
"queue",
"=",
"applicationinsights",
".",
"channel",
".",
"SynchronousQueue",
"(",
"sender",
")",
"channel",
"=",
"applicationinsights",
".",
"channel",
".",
"TelemetryChannel",
"(",
"None",
",",
"queue",
")",
"return",
"applicationinsights",
".",
"TelemetryClient",
"(",
"\"00000000-0000-0000-0000-000000000000\"",
",",
"channel",
")"
] | Creates a dummy channel so even if we're not logging telemetry, we can still send
along the real object to things that depend on it to exist | [
"Creates",
"a",
"dummy",
"channel",
"so",
"even",
"if",
"we",
"re",
"not",
"logging",
"telemetry",
"we",
"can",
"still",
"send",
"along",
"the",
"real",
"object",
"to",
"things",
"that",
"depend",
"on",
"it",
"to",
"exist"
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/django/common.py#L75-L82 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/exceptions/enable.py | enable | def enable(instrumentation_key, *args, **kwargs):
"""Enables the automatic collection of unhandled exceptions. Captured exceptions will be sent to the Application
Insights service before being re-thrown. Multiple calls to this function with different instrumentation keys result
in multiple instances being submitted, one for each key.
.. code:: python
from applicationinsights.exceptions import enable
# set up exception capture
enable('<YOUR INSTRUMENTATION KEY GOES HERE>')
# raise an exception (this will be sent to the Application Insights service as an exception telemetry object)
raise Exception('Boom!')
Args:
instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.
"""
if not instrumentation_key:
raise Exception('Instrumentation key was required but not provided')
global original_excepthook
global telemetry_channel
telemetry_channel = kwargs.get('telemetry_channel')
if not original_excepthook:
original_excepthook = sys.excepthook
sys.excepthook = intercept_excepthook
if instrumentation_key not in enabled_instrumentation_keys:
enabled_instrumentation_keys.append(instrumentation_key) | python | def enable(instrumentation_key, *args, **kwargs):
"""Enables the automatic collection of unhandled exceptions. Captured exceptions will be sent to the Application
Insights service before being re-thrown. Multiple calls to this function with different instrumentation keys result
in multiple instances being submitted, one for each key.
.. code:: python
from applicationinsights.exceptions import enable
# set up exception capture
enable('<YOUR INSTRUMENTATION KEY GOES HERE>')
# raise an exception (this will be sent to the Application Insights service as an exception telemetry object)
raise Exception('Boom!')
Args:
instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.
"""
if not instrumentation_key:
raise Exception('Instrumentation key was required but not provided')
global original_excepthook
global telemetry_channel
telemetry_channel = kwargs.get('telemetry_channel')
if not original_excepthook:
original_excepthook = sys.excepthook
sys.excepthook = intercept_excepthook
if instrumentation_key not in enabled_instrumentation_keys:
enabled_instrumentation_keys.append(instrumentation_key) | [
"def",
"enable",
"(",
"instrumentation_key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"instrumentation_key",
":",
"raise",
"Exception",
"(",
"'Instrumentation key was required but not provided'",
")",
"global",
"original_excepthook",
"global",
"telemetry_channel",
"telemetry_channel",
"=",
"kwargs",
".",
"get",
"(",
"'telemetry_channel'",
")",
"if",
"not",
"original_excepthook",
":",
"original_excepthook",
"=",
"sys",
".",
"excepthook",
"sys",
".",
"excepthook",
"=",
"intercept_excepthook",
"if",
"instrumentation_key",
"not",
"in",
"enabled_instrumentation_keys",
":",
"enabled_instrumentation_keys",
".",
"append",
"(",
"instrumentation_key",
")"
] | Enables the automatic collection of unhandled exceptions. Captured exceptions will be sent to the Application
Insights service before being re-thrown. Multiple calls to this function with different instrumentation keys result
in multiple instances being submitted, one for each key.
.. code:: python
from applicationinsights.exceptions import enable
# set up exception capture
enable('<YOUR INSTRUMENTATION KEY GOES HERE>')
# raise an exception (this will be sent to the Application Insights service as an exception telemetry object)
raise Exception('Boom!')
Args:
instrumentation_key (str). the instrumentation key to use while sending telemetry to the service. | [
"Enables",
"the",
"automatic",
"collection",
"of",
"unhandled",
"exceptions",
".",
"Captured",
"exceptions",
"will",
"be",
"sent",
"to",
"the",
"Application",
"Insights",
"service",
"before",
"being",
"re",
"-",
"thrown",
".",
"Multiple",
"calls",
"to",
"this",
"function",
"with",
"different",
"instrumentation",
"keys",
"result",
"in",
"multiple",
"instances",
"being",
"submitted",
"one",
"for",
"each",
"key",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/exceptions/enable.py#L8-L35 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/flask/ext.py | AppInsights.init_app | def init_app(self, app):
"""
Initializes the extension for the provided Flask application.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
self._key = app.config.get(CONF_KEY) or getenv(CONF_KEY)
if not self._key:
return
self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
sender = AsynchronousSender(self._endpoint_uri)
queue = AsynchronousQueue(sender)
self._channel = TelemetryChannel(None, queue)
self._init_request_logging(app)
self._init_trace_logging(app)
self._init_exception_logging(app) | python | def init_app(self, app):
"""
Initializes the extension for the provided Flask application.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
self._key = app.config.get(CONF_KEY) or getenv(CONF_KEY)
if not self._key:
return
self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
sender = AsynchronousSender(self._endpoint_uri)
queue = AsynchronousQueue(sender)
self._channel = TelemetryChannel(None, queue)
self._init_request_logging(app)
self._init_trace_logging(app)
self._init_exception_logging(app) | [
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"self",
".",
"_key",
"=",
"app",
".",
"config",
".",
"get",
"(",
"CONF_KEY",
")",
"or",
"getenv",
"(",
"CONF_KEY",
")",
"if",
"not",
"self",
".",
"_key",
":",
"return",
"self",
".",
"_endpoint_uri",
"=",
"app",
".",
"config",
".",
"get",
"(",
"CONF_ENDPOINT_URI",
")",
"sender",
"=",
"AsynchronousSender",
"(",
"self",
".",
"_endpoint_uri",
")",
"queue",
"=",
"AsynchronousQueue",
"(",
"sender",
")",
"self",
".",
"_channel",
"=",
"TelemetryChannel",
"(",
"None",
",",
"queue",
")",
"self",
".",
"_init_request_logging",
"(",
"app",
")",
"self",
".",
"_init_trace_logging",
"(",
"app",
")",
"self",
".",
"_init_exception_logging",
"(",
"app",
")"
] | Initializes the extension for the provided Flask application.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. | [
"Initializes",
"the",
"extension",
"for",
"the",
"provided",
"Flask",
"application",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/flask/ext.py#L87-L107 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/flask/ext.py | AppInsights._init_request_logging | def _init_request_logging(self, app):
"""
Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)
if not enabled:
return
self._requests_middleware = WSGIApplication(
self._key, app.wsgi_app, telemetry_channel=self._channel)
app.wsgi_app = self._requests_middleware | python | def _init_request_logging(self, app):
"""
Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)
if not enabled:
return
self._requests_middleware = WSGIApplication(
self._key, app.wsgi_app, telemetry_channel=self._channel)
app.wsgi_app = self._requests_middleware | [
"def",
"_init_request_logging",
"(",
"self",
",",
"app",
")",
":",
"enabled",
"=",
"not",
"app",
".",
"config",
".",
"get",
"(",
"CONF_DISABLE_REQUEST_LOGGING",
",",
"False",
")",
"if",
"not",
"enabled",
":",
"return",
"self",
".",
"_requests_middleware",
"=",
"WSGIApplication",
"(",
"self",
".",
"_key",
",",
"app",
".",
"wsgi_app",
",",
"telemetry_channel",
"=",
"self",
".",
"_channel",
")",
"app",
".",
"wsgi_app",
"=",
"self",
".",
"_requests_middleware"
] | Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. | [
"Sets",
"up",
"request",
"logging",
"unless",
"APPINSIGHTS_DISABLE_REQUEST_LOGGING",
"is",
"set",
"in",
"the",
"Flask",
"config",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/flask/ext.py#L119-L135 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/flask/ext.py | AppInsights._init_trace_logging | def _init_trace_logging(self, app):
"""
Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler) | python | def _init_trace_logging(self, app):
"""
Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler) | [
"def",
"_init_trace_logging",
"(",
"self",
",",
"app",
")",
":",
"enabled",
"=",
"not",
"app",
".",
"config",
".",
"get",
"(",
"CONF_DISABLE_TRACE_LOGGING",
",",
"False",
")",
"if",
"not",
"enabled",
":",
"return",
"self",
".",
"_trace_log_handler",
"=",
"LoggingHandler",
"(",
"self",
".",
"_key",
",",
"telemetry_channel",
"=",
"self",
".",
"_channel",
")",
"app",
".",
"logger",
".",
"addHandler",
"(",
"self",
".",
"_trace_log_handler",
")"
] | Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. | [
"Sets",
"up",
"trace",
"logging",
"unless",
"APPINSIGHTS_DISABLE_TRACE_LOGGING",
"is",
"set",
"in",
"the",
"Flask",
"config",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/flask/ext.py#L137-L153 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/flask/ext.py | AppInsights._init_exception_logging | def _init_exception_logging(self, app):
"""
Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
if HTTPException and isinstance(exception, HTTPException):
return exception
try:
raise exception
except Exception:
exception_telemetry_client.track_exception()
finally:
raise exception
self._exception_telemetry_client = exception_telemetry_client | python | def _init_exception_logging(self, app):
"""
Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
if HTTPException and isinstance(exception, HTTPException):
return exception
try:
raise exception
except Exception:
exception_telemetry_client.track_exception()
finally:
raise exception
self._exception_telemetry_client = exception_telemetry_client | [
"def",
"_init_exception_logging",
"(",
"self",
",",
"app",
")",
":",
"enabled",
"=",
"not",
"app",
".",
"config",
".",
"get",
"(",
"CONF_DISABLE_EXCEPTION_LOGGING",
",",
"False",
")",
"if",
"not",
"enabled",
":",
"return",
"exception_telemetry_client",
"=",
"TelemetryClient",
"(",
"self",
".",
"_key",
",",
"telemetry_channel",
"=",
"self",
".",
"_channel",
")",
"@",
"app",
".",
"errorhandler",
"(",
"Exception",
")",
"def",
"exception_handler",
"(",
"exception",
")",
":",
"if",
"HTTPException",
"and",
"isinstance",
"(",
"exception",
",",
"HTTPException",
")",
":",
"return",
"exception",
"try",
":",
"raise",
"exception",
"except",
"Exception",
":",
"exception_telemetry_client",
".",
"track_exception",
"(",
")",
"finally",
":",
"raise",
"exception",
"self",
".",
"_exception_telemetry_client",
"=",
"exception_telemetry_client"
] | Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. | [
"Sets",
"up",
"exception",
"logging",
"unless",
"APPINSIGHTS_DISABLE_EXCEPTION_LOGGING",
"is",
"set",
"in",
"the",
"Flask",
"config",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/flask/ext.py#L155-L183 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/flask/ext.py | AppInsights.flush | def flush(self):
"""Flushes the queued up telemetry to the service.
"""
if self._requests_middleware:
self._requests_middleware.flush()
if self._trace_log_handler:
self._trace_log_handler.flush()
if self._exception_telemetry_client:
self._exception_telemetry_client.flush() | python | def flush(self):
"""Flushes the queued up telemetry to the service.
"""
if self._requests_middleware:
self._requests_middleware.flush()
if self._trace_log_handler:
self._trace_log_handler.flush()
if self._exception_telemetry_client:
self._exception_telemetry_client.flush() | [
"def",
"flush",
"(",
"self",
")",
":",
"if",
"self",
".",
"_requests_middleware",
":",
"self",
".",
"_requests_middleware",
".",
"flush",
"(",
")",
"if",
"self",
".",
"_trace_log_handler",
":",
"self",
".",
"_trace_log_handler",
".",
"flush",
"(",
")",
"if",
"self",
".",
"_exception_telemetry_client",
":",
"self",
".",
"_exception_telemetry_client",
".",
"flush",
"(",
")"
] | Flushes the queued up telemetry to the service. | [
"Flushes",
"the",
"queued",
"up",
"telemetry",
"to",
"the",
"service",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/flask/ext.py#L185-L195 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/channel/QueueBase.py | QueueBase.get | def get(self):
"""Gets a single item from the queue and returns it. If the queue is empty, this method will return None.
Returns:
:class:`contracts.Envelope`. a telemetry envelope object or None if the queue is empty.
"""
try:
item = self._queue.get_nowait()
except (Empty, PersistEmpty):
return None
if self._persistence_path:
self._queue.task_done()
return item | python | def get(self):
"""Gets a single item from the queue and returns it. If the queue is empty, this method will return None.
Returns:
:class:`contracts.Envelope`. a telemetry envelope object or None if the queue is empty.
"""
try:
item = self._queue.get_nowait()
except (Empty, PersistEmpty):
return None
if self._persistence_path:
self._queue.task_done()
return item | [
"def",
"get",
"(",
"self",
")",
":",
"try",
":",
"item",
"=",
"self",
".",
"_queue",
".",
"get_nowait",
"(",
")",
"except",
"(",
"Empty",
",",
"PersistEmpty",
")",
":",
"return",
"None",
"if",
"self",
".",
"_persistence_path",
":",
"self",
".",
"_queue",
".",
"task_done",
"(",
")",
"return",
"item"
] | Gets a single item from the queue and returns it. If the queue is empty, this method will return None.
Returns:
:class:`contracts.Envelope`. a telemetry envelope object or None if the queue is empty. | [
"Gets",
"a",
"single",
"item",
"from",
"the",
"queue",
"and",
"returns",
"it",
".",
"If",
"the",
"queue",
"is",
"empty",
"this",
"method",
"will",
"return",
"None",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/QueueBase.py#L92-L106 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/logging/LoggingHandler.py | enable | def enable(instrumentation_key, *args, **kwargs):
"""Enables the Application Insights logging handler for the root logger for the supplied instrumentation key.
Multiple calls to this function with different instrumentation keys result in multiple handler instances.
.. code:: python
import logging
from applicationinsights.logging import enable
# set up logging
enable('<YOUR INSTRUMENTATION KEY GOES HERE>')
# log something (this will be sent to the Application Insights service as a trace)
logging.info('This is a message')
# logging shutdown will cause a flush of all un-sent telemetry items
# alternatively set up an async channel via enable('<YOUR INSTRUMENTATION KEY GOES HERE>', async_=True)
Args:
instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.
Keyword Args:
async_ (bool): Whether to use an async channel for the telemetry. Defaults to False.
endpoint (str): The custom endpoint to which to send the telemetry. Defaults to None.
level (Union[int, str]): The level to set for the logger. Defaults to INFO.
Returns:
:class:`ApplicationInsightsHandler`. the newly created or existing handler.
"""
if not instrumentation_key:
raise Exception('Instrumentation key was required but not provided')
if instrumentation_key in enabled_instrumentation_keys:
logging.getLogger().removeHandler(enabled_instrumentation_keys[instrumentation_key])
async_ = kwargs.pop('async_', False)
endpoint = kwargs.pop('endpoint', None)
telemetry_channel = kwargs.get('telemetry_channel')
if telemetry_channel and async_:
raise Exception('Incompatible arguments async_ and telemetry_channel')
if telemetry_channel and endpoint:
raise Exception('Incompatible arguments endpoint and telemetry_channel')
if not telemetry_channel:
if async_:
sender, queue = AsynchronousSender, AsynchronousQueue
else:
sender, queue = SynchronousSender, SynchronousQueue
kwargs['telemetry_channel'] = TelemetryChannel(queue=queue(sender(endpoint)))
log_level = kwargs.pop('level', logging.INFO)
handler = LoggingHandler(instrumentation_key, *args, **kwargs)
handler.setLevel(log_level)
enabled_instrumentation_keys[instrumentation_key] = handler
logging.getLogger().addHandler(handler)
return handler | python | def enable(instrumentation_key, *args, **kwargs):
"""Enables the Application Insights logging handler for the root logger for the supplied instrumentation key.
Multiple calls to this function with different instrumentation keys result in multiple handler instances.
.. code:: python
import logging
from applicationinsights.logging import enable
# set up logging
enable('<YOUR INSTRUMENTATION KEY GOES HERE>')
# log something (this will be sent to the Application Insights service as a trace)
logging.info('This is a message')
# logging shutdown will cause a flush of all un-sent telemetry items
# alternatively set up an async channel via enable('<YOUR INSTRUMENTATION KEY GOES HERE>', async_=True)
Args:
instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.
Keyword Args:
async_ (bool): Whether to use an async channel for the telemetry. Defaults to False.
endpoint (str): The custom endpoint to which to send the telemetry. Defaults to None.
level (Union[int, str]): The level to set for the logger. Defaults to INFO.
Returns:
:class:`ApplicationInsightsHandler`. the newly created or existing handler.
"""
if not instrumentation_key:
raise Exception('Instrumentation key was required but not provided')
if instrumentation_key in enabled_instrumentation_keys:
logging.getLogger().removeHandler(enabled_instrumentation_keys[instrumentation_key])
async_ = kwargs.pop('async_', False)
endpoint = kwargs.pop('endpoint', None)
telemetry_channel = kwargs.get('telemetry_channel')
if telemetry_channel and async_:
raise Exception('Incompatible arguments async_ and telemetry_channel')
if telemetry_channel and endpoint:
raise Exception('Incompatible arguments endpoint and telemetry_channel')
if not telemetry_channel:
if async_:
sender, queue = AsynchronousSender, AsynchronousQueue
else:
sender, queue = SynchronousSender, SynchronousQueue
kwargs['telemetry_channel'] = TelemetryChannel(queue=queue(sender(endpoint)))
log_level = kwargs.pop('level', logging.INFO)
handler = LoggingHandler(instrumentation_key, *args, **kwargs)
handler.setLevel(log_level)
enabled_instrumentation_keys[instrumentation_key] = handler
logging.getLogger().addHandler(handler)
return handler | [
"def",
"enable",
"(",
"instrumentation_key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"instrumentation_key",
":",
"raise",
"Exception",
"(",
"'Instrumentation key was required but not provided'",
")",
"if",
"instrumentation_key",
"in",
"enabled_instrumentation_keys",
":",
"logging",
".",
"getLogger",
"(",
")",
".",
"removeHandler",
"(",
"enabled_instrumentation_keys",
"[",
"instrumentation_key",
"]",
")",
"async_",
"=",
"kwargs",
".",
"pop",
"(",
"'async_'",
",",
"False",
")",
"endpoint",
"=",
"kwargs",
".",
"pop",
"(",
"'endpoint'",
",",
"None",
")",
"telemetry_channel",
"=",
"kwargs",
".",
"get",
"(",
"'telemetry_channel'",
")",
"if",
"telemetry_channel",
"and",
"async_",
":",
"raise",
"Exception",
"(",
"'Incompatible arguments async_ and telemetry_channel'",
")",
"if",
"telemetry_channel",
"and",
"endpoint",
":",
"raise",
"Exception",
"(",
"'Incompatible arguments endpoint and telemetry_channel'",
")",
"if",
"not",
"telemetry_channel",
":",
"if",
"async_",
":",
"sender",
",",
"queue",
"=",
"AsynchronousSender",
",",
"AsynchronousQueue",
"else",
":",
"sender",
",",
"queue",
"=",
"SynchronousSender",
",",
"SynchronousQueue",
"kwargs",
"[",
"'telemetry_channel'",
"]",
"=",
"TelemetryChannel",
"(",
"queue",
"=",
"queue",
"(",
"sender",
"(",
"endpoint",
")",
")",
")",
"log_level",
"=",
"kwargs",
".",
"pop",
"(",
"'level'",
",",
"logging",
".",
"INFO",
")",
"handler",
"=",
"LoggingHandler",
"(",
"instrumentation_key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"handler",
".",
"setLevel",
"(",
"log_level",
")",
"enabled_instrumentation_keys",
"[",
"instrumentation_key",
"]",
"=",
"handler",
"logging",
".",
"getLogger",
"(",
")",
".",
"addHandler",
"(",
"handler",
")",
"return",
"handler"
] | Enables the Application Insights logging handler for the root logger for the supplied instrumentation key.
Multiple calls to this function with different instrumentation keys result in multiple handler instances.
.. code:: python
import logging
from applicationinsights.logging import enable
# set up logging
enable('<YOUR INSTRUMENTATION KEY GOES HERE>')
# log something (this will be sent to the Application Insights service as a trace)
logging.info('This is a message')
# logging shutdown will cause a flush of all un-sent telemetry items
# alternatively set up an async channel via enable('<YOUR INSTRUMENTATION KEY GOES HERE>', async_=True)
Args:
instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.
Keyword Args:
async_ (bool): Whether to use an async channel for the telemetry. Defaults to False.
endpoint (str): The custom endpoint to which to send the telemetry. Defaults to None.
level (Union[int, str]): The level to set for the logger. Defaults to INFO.
Returns:
:class:`ApplicationInsightsHandler`. the newly created or existing handler. | [
"Enables",
"the",
"Application",
"Insights",
"logging",
"handler",
"for",
"the",
"root",
"logger",
"for",
"the",
"supplied",
"instrumentation",
"key",
".",
"Multiple",
"calls",
"to",
"this",
"function",
"with",
"different",
"instrumentation",
"keys",
"result",
"in",
"multiple",
"handler",
"instances",
"."
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/logging/LoggingHandler.py#L10-L61 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/channel/AsynchronousSender.py | AsynchronousSender.start | def start(self):
"""Starts a new sender thread if none is not already there
"""
with self._lock_send_remaining_time:
if self._send_remaining_time <= 0.0:
local_send_interval = self._send_interval
if self._send_interval < 0.1:
local_send_interval = 0.1
self._send_remaining_time = self._send_time
if self._send_remaining_time < local_send_interval:
self._send_remaining_time = local_send_interval
thread = Thread(target=self._run)
thread.daemon = True
thread.start() | python | def start(self):
"""Starts a new sender thread if none is not already there
"""
with self._lock_send_remaining_time:
if self._send_remaining_time <= 0.0:
local_send_interval = self._send_interval
if self._send_interval < 0.1:
local_send_interval = 0.1
self._send_remaining_time = self._send_time
if self._send_remaining_time < local_send_interval:
self._send_remaining_time = local_send_interval
thread = Thread(target=self._run)
thread.daemon = True
thread.start() | [
"def",
"start",
"(",
"self",
")",
":",
"with",
"self",
".",
"_lock_send_remaining_time",
":",
"if",
"self",
".",
"_send_remaining_time",
"<=",
"0.0",
":",
"local_send_interval",
"=",
"self",
".",
"_send_interval",
"if",
"self",
".",
"_send_interval",
"<",
"0.1",
":",
"local_send_interval",
"=",
"0.1",
"self",
".",
"_send_remaining_time",
"=",
"self",
".",
"_send_time",
"if",
"self",
".",
"_send_remaining_time",
"<",
"local_send_interval",
":",
"self",
".",
"_send_remaining_time",
"=",
"local_send_interval",
"thread",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"_run",
")",
"thread",
".",
"daemon",
"=",
"True",
"thread",
".",
"start",
"(",
")"
] | Starts a new sender thread if none is not already there | [
"Starts",
"a",
"new",
"sender",
"thread",
"if",
"none",
"is",
"not",
"already",
"there"
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/AsynchronousSender.py#L76-L89 | train |
Microsoft/ApplicationInsights-Python | applicationinsights/channel/TelemetryContext.py | device_initialize | def device_initialize(self):
""" The device initializer used to assign special properties to all device context objects"""
existing_device_initialize(self)
self.type = 'Other'
self.id = platform.node()
self.os_version = platform.version()
self.locale = locale.getdefaultlocale()[0] | python | def device_initialize(self):
""" The device initializer used to assign special properties to all device context objects"""
existing_device_initialize(self)
self.type = 'Other'
self.id = platform.node()
self.os_version = platform.version()
self.locale = locale.getdefaultlocale()[0] | [
"def",
"device_initialize",
"(",
"self",
")",
":",
"existing_device_initialize",
"(",
"self",
")",
"self",
".",
"type",
"=",
"'Other'",
"self",
".",
"id",
"=",
"platform",
".",
"node",
"(",
")",
"self",
".",
"os_version",
"=",
"platform",
".",
"version",
"(",
")",
"self",
".",
"locale",
"=",
"locale",
".",
"getdefaultlocale",
"(",
")",
"[",
"0",
"]"
] | The device initializer used to assign special properties to all device context objects | [
"The",
"device",
"initializer",
"used",
"to",
"assign",
"special",
"properties",
"to",
"all",
"device",
"context",
"objects"
] | 8452ab7126f9bb6964637d4aa1258c2af17563d6 | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/TelemetryContext.py#L8-L14 | train |
hyperledger/indy-crypto | wrappers/python/indy_crypto/bls.py | Bls.sign | def sign(message: bytes, sign_key: SignKey) -> Signature:
"""
Signs the message and returns signature.
:param: message - Message to sign
:param: sign_key - Sign key
:return: Signature
"""
logger = logging.getLogger(__name__)
logger.debug("Bls::sign: >>> message: %r, sign_key: %r", message, sign_key)
c_instance = c_void_p()
do_call('indy_crypto_bls_sign',
message, len(message),
sign_key.c_instance,
byref(c_instance))
res = Signature(c_instance)
logger.debug("Bls::sign: <<< res: %r", res)
return res | python | def sign(message: bytes, sign_key: SignKey) -> Signature:
"""
Signs the message and returns signature.
:param: message - Message to sign
:param: sign_key - Sign key
:return: Signature
"""
logger = logging.getLogger(__name__)
logger.debug("Bls::sign: >>> message: %r, sign_key: %r", message, sign_key)
c_instance = c_void_p()
do_call('indy_crypto_bls_sign',
message, len(message),
sign_key.c_instance,
byref(c_instance))
res = Signature(c_instance)
logger.debug("Bls::sign: <<< res: %r", res)
return res | [
"def",
"sign",
"(",
"message",
":",
"bytes",
",",
"sign_key",
":",
"SignKey",
")",
"->",
"Signature",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"Bls::sign: >>> message: %r, sign_key: %r\"",
",",
"message",
",",
"sign_key",
")",
"c_instance",
"=",
"c_void_p",
"(",
")",
"do_call",
"(",
"'indy_crypto_bls_sign'",
",",
"message",
",",
"len",
"(",
"message",
")",
",",
"sign_key",
".",
"c_instance",
",",
"byref",
"(",
"c_instance",
")",
")",
"res",
"=",
"Signature",
"(",
"c_instance",
")",
"logger",
".",
"debug",
"(",
"\"Bls::sign: <<< res: %r\"",
",",
"res",
")",
"return",
"res"
] | Signs the message and returns signature.
:param: message - Message to sign
:param: sign_key - Sign key
:return: Signature | [
"Signs",
"the",
"message",
"and",
"returns",
"signature",
"."
] | 1675e29a2a5949b44899553d3d128335cf7a61b3 | https://github.com/hyperledger/indy-crypto/blob/1675e29a2a5949b44899553d3d128335cf7a61b3/wrappers/python/indy_crypto/bls.py#L229-L250 | train |
hyperledger/indy-crypto | wrappers/python/indy_crypto/bls.py | Bls.verify | def verify(signature: Signature, message: bytes, ver_key: VerKey, gen: Generator) -> bool:
"""
Verifies the message signature and returns true - if signature valid or false otherwise.
:param: signature - Signature to verify
:param: message - Message to verify
:param: ver_key - Verification key
:param: gen - Generator point
:return: true if signature valid
"""
logger = logging.getLogger(__name__)
logger.debug("Bls::verify: >>> signature: %r, message: %r, ver_key: %r, gen: %r", signature, message, ver_key,
gen)
valid = c_bool()
do_call('indy_crypto_bsl_verify',
signature.c_instance,
message, len(message),
ver_key.c_instance,
gen.c_instance,
byref(valid))
res = valid
logger.debug("Bls::verify: <<< res: %r", res)
return res | python | def verify(signature: Signature, message: bytes, ver_key: VerKey, gen: Generator) -> bool:
"""
Verifies the message signature and returns true - if signature valid or false otherwise.
:param: signature - Signature to verify
:param: message - Message to verify
:param: ver_key - Verification key
:param: gen - Generator point
:return: true if signature valid
"""
logger = logging.getLogger(__name__)
logger.debug("Bls::verify: >>> signature: %r, message: %r, ver_key: %r, gen: %r", signature, message, ver_key,
gen)
valid = c_bool()
do_call('indy_crypto_bsl_verify',
signature.c_instance,
message, len(message),
ver_key.c_instance,
gen.c_instance,
byref(valid))
res = valid
logger.debug("Bls::verify: <<< res: %r", res)
return res | [
"def",
"verify",
"(",
"signature",
":",
"Signature",
",",
"message",
":",
"bytes",
",",
"ver_key",
":",
"VerKey",
",",
"gen",
":",
"Generator",
")",
"->",
"bool",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"Bls::verify: >>> signature: %r, message: %r, ver_key: %r, gen: %r\"",
",",
"signature",
",",
"message",
",",
"ver_key",
",",
"gen",
")",
"valid",
"=",
"c_bool",
"(",
")",
"do_call",
"(",
"'indy_crypto_bsl_verify'",
",",
"signature",
".",
"c_instance",
",",
"message",
",",
"len",
"(",
"message",
")",
",",
"ver_key",
".",
"c_instance",
",",
"gen",
".",
"c_instance",
",",
"byref",
"(",
"valid",
")",
")",
"res",
"=",
"valid",
"logger",
".",
"debug",
"(",
"\"Bls::verify: <<< res: %r\"",
",",
"res",
")",
"return",
"res"
] | Verifies the message signature and returns true - if signature valid or false otherwise.
:param: signature - Signature to verify
:param: message - Message to verify
:param: ver_key - Verification key
:param: gen - Generator point
:return: true if signature valid | [
"Verifies",
"the",
"message",
"signature",
"and",
"returns",
"true",
"-",
"if",
"signature",
"valid",
"or",
"false",
"otherwise",
"."
] | 1675e29a2a5949b44899553d3d128335cf7a61b3 | https://github.com/hyperledger/indy-crypto/blob/1675e29a2a5949b44899553d3d128335cf7a61b3/wrappers/python/indy_crypto/bls.py#L253-L278 | train |
hyperledger/indy-crypto | wrappers/python/indy_crypto/bls.py | Bls.verify_pop | def verify_pop(pop: ProofOfPossession, ver_key: VerKey, gen: Generator) -> bool:
"""
Verifies the proof of possession and returns true - if signature valid or false otherwise.
:param: pop - Proof of possession
:param: ver_key - Verification key
:param: gen - Generator point
:return: true if signature valid
"""
logger = logging.getLogger(__name__)
logger.debug("Bls::verify_pop: >>> pop: %r, ver_key: %r, gen: %r",
pop,
ver_key,
gen)
valid = c_bool()
do_call('indy_crypto_bsl_verify_pop',
pop.c_instance,
ver_key.c_instance,
gen.c_instance,
byref(valid))
res = valid
logger.debug("Bls::verify_pop: <<< res: %r", res)
return res | python | def verify_pop(pop: ProofOfPossession, ver_key: VerKey, gen: Generator) -> bool:
"""
Verifies the proof of possession and returns true - if signature valid or false otherwise.
:param: pop - Proof of possession
:param: ver_key - Verification key
:param: gen - Generator point
:return: true if signature valid
"""
logger = logging.getLogger(__name__)
logger.debug("Bls::verify_pop: >>> pop: %r, ver_key: %r, gen: %r",
pop,
ver_key,
gen)
valid = c_bool()
do_call('indy_crypto_bsl_verify_pop',
pop.c_instance,
ver_key.c_instance,
gen.c_instance,
byref(valid))
res = valid
logger.debug("Bls::verify_pop: <<< res: %r", res)
return res | [
"def",
"verify_pop",
"(",
"pop",
":",
"ProofOfPossession",
",",
"ver_key",
":",
"VerKey",
",",
"gen",
":",
"Generator",
")",
"->",
"bool",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"Bls::verify_pop: >>> pop: %r, ver_key: %r, gen: %r\"",
",",
"pop",
",",
"ver_key",
",",
"gen",
")",
"valid",
"=",
"c_bool",
"(",
")",
"do_call",
"(",
"'indy_crypto_bsl_verify_pop'",
",",
"pop",
".",
"c_instance",
",",
"ver_key",
".",
"c_instance",
",",
"gen",
".",
"c_instance",
",",
"byref",
"(",
"valid",
")",
")",
"res",
"=",
"valid",
"logger",
".",
"debug",
"(",
"\"Bls::verify_pop: <<< res: %r\"",
",",
"res",
")",
"return",
"res"
] | Verifies the proof of possession and returns true - if signature valid or false otherwise.
:param: pop - Proof of possession
:param: ver_key - Verification key
:param: gen - Generator point
:return: true if signature valid | [
"Verifies",
"the",
"proof",
"of",
"possession",
"and",
"returns",
"true",
"-",
"if",
"signature",
"valid",
"or",
"false",
"otherwise",
"."
] | 1675e29a2a5949b44899553d3d128335cf7a61b3 | https://github.com/hyperledger/indy-crypto/blob/1675e29a2a5949b44899553d3d128335cf7a61b3/wrappers/python/indy_crypto/bls.py#L281-L306 | train |
hyperledger/indy-crypto | wrappers/python/indy_crypto/bls.py | Bls.verify_multi_sig | def verify_multi_sig(multi_sig: MultiSignature, message: bytes, ver_keys: [VerKey], gen: Generator) -> bool:
"""
Verifies the message multi signature and returns true - if signature valid or false otherwise.
:param: multi_sig - Multi signature to verify
:param: message - Message to verify
:param: ver_keys - List of verification keys
:param: gen - Generator point
:return: true if multi signature valid.
"""
logger = logging.getLogger(__name__)
logger.debug("Bls::verify_multi_sig: >>> multi_sig: %r, message: %r, ver_keys: %r, gen: %r",
multi_sig, message, ver_keys, gen)
# noinspection PyCallingNonCallable,PyTypeChecker
ver_key_c_instances = (c_void_p * len(ver_keys))()
for i in range(len(ver_keys)):
ver_key_c_instances[i] = ver_keys[i].c_instance
valid = c_bool()
do_call('indy_crypto_bls_verify_multi_sig',
multi_sig.c_instance,
message, len(message),
ver_key_c_instances, len(ver_keys),
gen.c_instance,
byref(valid))
res = valid
logger.debug("Bls::verify_multi_sig: <<< res: %r", res)
return res | python | def verify_multi_sig(multi_sig: MultiSignature, message: bytes, ver_keys: [VerKey], gen: Generator) -> bool:
"""
Verifies the message multi signature and returns true - if signature valid or false otherwise.
:param: multi_sig - Multi signature to verify
:param: message - Message to verify
:param: ver_keys - List of verification keys
:param: gen - Generator point
:return: true if multi signature valid.
"""
logger = logging.getLogger(__name__)
logger.debug("Bls::verify_multi_sig: >>> multi_sig: %r, message: %r, ver_keys: %r, gen: %r",
multi_sig, message, ver_keys, gen)
# noinspection PyCallingNonCallable,PyTypeChecker
ver_key_c_instances = (c_void_p * len(ver_keys))()
for i in range(len(ver_keys)):
ver_key_c_instances[i] = ver_keys[i].c_instance
valid = c_bool()
do_call('indy_crypto_bls_verify_multi_sig',
multi_sig.c_instance,
message, len(message),
ver_key_c_instances, len(ver_keys),
gen.c_instance,
byref(valid))
res = valid
logger.debug("Bls::verify_multi_sig: <<< res: %r", res)
return res | [
"def",
"verify_multi_sig",
"(",
"multi_sig",
":",
"MultiSignature",
",",
"message",
":",
"bytes",
",",
"ver_keys",
":",
"[",
"VerKey",
"]",
",",
"gen",
":",
"Generator",
")",
"->",
"bool",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"Bls::verify_multi_sig: >>> multi_sig: %r, message: %r, ver_keys: %r, gen: %r\"",
",",
"multi_sig",
",",
"message",
",",
"ver_keys",
",",
"gen",
")",
"# noinspection PyCallingNonCallable,PyTypeChecker",
"ver_key_c_instances",
"=",
"(",
"c_void_p",
"*",
"len",
"(",
"ver_keys",
")",
")",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"ver_keys",
")",
")",
":",
"ver_key_c_instances",
"[",
"i",
"]",
"=",
"ver_keys",
"[",
"i",
"]",
".",
"c_instance",
"valid",
"=",
"c_bool",
"(",
")",
"do_call",
"(",
"'indy_crypto_bls_verify_multi_sig'",
",",
"multi_sig",
".",
"c_instance",
",",
"message",
",",
"len",
"(",
"message",
")",
",",
"ver_key_c_instances",
",",
"len",
"(",
"ver_keys",
")",
",",
"gen",
".",
"c_instance",
",",
"byref",
"(",
"valid",
")",
")",
"res",
"=",
"valid",
"logger",
".",
"debug",
"(",
"\"Bls::verify_multi_sig: <<< res: %r\"",
",",
"res",
")",
"return",
"res"
] | Verifies the message multi signature and returns true - if signature valid or false otherwise.
:param: multi_sig - Multi signature to verify
:param: message - Message to verify
:param: ver_keys - List of verification keys
:param: gen - Generator point
:return: true if multi signature valid. | [
"Verifies",
"the",
"message",
"multi",
"signature",
"and",
"returns",
"true",
"-",
"if",
"signature",
"valid",
"or",
"false",
"otherwise",
"."
] | 1675e29a2a5949b44899553d3d128335cf7a61b3 | https://github.com/hyperledger/indy-crypto/blob/1675e29a2a5949b44899553d3d128335cf7a61b3/wrappers/python/indy_crypto/bls.py#L309-L340 | train |
nephila/djangocms-blog | djangocms_blog/admin.py | PostAdmin.get_urls | def get_urls(self):
"""
Customize the modeladmin urls
"""
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls | python | def get_urls(self):
"""
Customize the modeladmin urls
"""
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls | [
"def",
"get_urls",
"(",
"self",
")",
":",
"urls",
"=",
"[",
"url",
"(",
"r'^publish/([0-9]+)/$'",
",",
"self",
".",
"admin_site",
".",
"admin_view",
"(",
"self",
".",
"publish_post",
")",
",",
"name",
"=",
"'djangocms_blog_publish_article'",
")",
",",
"]",
"urls",
".",
"extend",
"(",
"super",
"(",
"PostAdmin",
",",
"self",
")",
".",
"get_urls",
"(",
")",
")",
"return",
"urls"
] | Customize the modeladmin urls | [
"Customize",
"the",
"modeladmin",
"urls"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L224-L233 | train |
nephila/djangocms-blog | djangocms_blog/admin.py | PostAdmin.publish_post | def publish_post(self, request, pk):
"""
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
"""
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest')) | python | def publish_post(self, request, pk):
"""
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
"""
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest')) | [
"def",
"publish_post",
"(",
"self",
",",
"request",
",",
"pk",
")",
":",
"language",
"=",
"get_language_from_request",
"(",
"request",
",",
"check_path",
"=",
"True",
")",
"try",
":",
"post",
"=",
"Post",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"int",
"(",
"pk",
")",
")",
"post",
".",
"publish",
"=",
"True",
"post",
".",
"save",
"(",
")",
"return",
"HttpResponseRedirect",
"(",
"post",
".",
"get_absolute_url",
"(",
"language",
")",
")",
"except",
"Exception",
":",
"try",
":",
"return",
"HttpResponseRedirect",
"(",
"request",
".",
"META",
"[",
"'HTTP_REFERER'",
"]",
")",
"except",
"KeyError",
":",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'djangocms_blog:posts-latest'",
")",
")"
] | Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls | [
"Admin",
"view",
"to",
"publish",
"a",
"single",
"post"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L247-L265 | train |
nephila/djangocms-blog | djangocms_blog/admin.py | PostAdmin.has_restricted_sites | def has_restricted_sites(self, request):
"""
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
"""
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1 | python | def has_restricted_sites(self, request):
"""
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
"""
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1 | [
"def",
"has_restricted_sites",
"(",
"self",
",",
"request",
")",
":",
"sites",
"=",
"self",
".",
"get_restricted_sites",
"(",
"request",
")",
"return",
"sites",
"and",
"sites",
".",
"count",
"(",
")",
"==",
"1"
] | Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site | [
"Whether",
"the",
"current",
"user",
"has",
"permission",
"on",
"one",
"site",
"only"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L267-L275 | train |
nephila/djangocms-blog | djangocms_blog/admin.py | PostAdmin.get_restricted_sites | def get_restricted_sites(self, request):
"""
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
"""
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none() | python | def get_restricted_sites(self, request):
"""
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
"""
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none() | [
"def",
"get_restricted_sites",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"return",
"request",
".",
"user",
".",
"get_sites",
"(",
")",
"except",
"AttributeError",
":",
"# pragma: no cover",
"return",
"Site",
".",
"objects",
".",
"none",
"(",
")"
] | The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites | [
"The",
"sites",
"on",
"which",
"the",
"user",
"has",
"permission",
"on",
"."
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L277-L293 | train |
nephila/djangocms-blog | djangocms_blog/admin.py | PostAdmin.get_fieldsets | def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets | python | def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets | [
"def",
"get_fieldsets",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
")",
":",
"app_config_default",
"=",
"self",
".",
"_app_config_select",
"(",
"request",
",",
"obj",
")",
"if",
"app_config_default",
"is",
"None",
"and",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"super",
"(",
"PostAdmin",
",",
"self",
")",
".",
"get_fieldsets",
"(",
"request",
",",
"obj",
")",
"if",
"not",
"obj",
":",
"config",
"=",
"app_config_default",
"else",
":",
"config",
"=",
"obj",
".",
"app_config",
"fsets",
"=",
"deepcopy",
"(",
"self",
".",
"_fieldsets",
")",
"if",
"config",
":",
"abstract",
"=",
"bool",
"(",
"config",
".",
"use_abstract",
")",
"placeholder",
"=",
"bool",
"(",
"config",
".",
"use_placeholder",
")",
"related",
"=",
"bool",
"(",
"config",
".",
"use_related",
")",
"else",
":",
"abstract",
"=",
"get_setting",
"(",
"'USE_ABSTRACT'",
")",
"placeholder",
"=",
"get_setting",
"(",
"'USE_PLACEHOLDER'",
")",
"related",
"=",
"get_setting",
"(",
"'USE_RELATED'",
")",
"if",
"abstract",
":",
"fsets",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
".",
"append",
"(",
"'abstract'",
")",
"if",
"not",
"placeholder",
":",
"fsets",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
".",
"append",
"(",
"'post_text'",
")",
"if",
"get_setting",
"(",
"'MULTISITE'",
")",
"and",
"not",
"self",
".",
"has_restricted_sites",
"(",
"request",
")",
":",
"fsets",
"[",
"1",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"[",
"0",
"]",
".",
"append",
"(",
"'sites'",
")",
"if",
"request",
".",
"user",
".",
"is_superuser",
":",
"fsets",
"[",
"1",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"[",
"0",
"]",
".",
"append",
"(",
"'author'",
")",
"if",
"apps",
".",
"is_installed",
"(",
"'djangocms_blog.liveblog'",
")",
":",
"fsets",
"[",
"2",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"[",
"2",
"]",
".",
"append",
"(",
"'enable_liveblog'",
")",
"filter_function",
"=",
"get_setting",
"(",
"'ADMIN_POST_FIELDSET_FILTER'",
")",
"if",
"related",
"and",
"Post",
".",
"objects",
".",
"namespace",
"(",
"config",
".",
"namespace",
")",
".",
"active_translations",
"(",
")",
".",
"exists",
"(",
")",
":",
"fsets",
"[",
"1",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"[",
"0",
"]",
".",
"append",
"(",
"'related'",
")",
"if",
"callable",
"(",
"filter_function",
")",
":",
"fsets",
"=",
"filter_function",
"(",
"fsets",
",",
"request",
",",
"obj",
"=",
"obj",
")",
"return",
"fsets"
] | Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration | [
"Customize",
"the",
"fieldsets",
"according",
"to",
"the",
"app",
"settings"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L302-L342 | train |
nephila/djangocms-blog | djangocms_blog/admin.py | BlogConfigAdmin.save_model | def save_model(self, request, obj, form, change):
"""
Clear menu cache when changing menu structure
"""
if 'config.menu_structure' in form.changed_data:
from menus.menu_pool import menu_pool
menu_pool.clear(all=True)
return super(BlogConfigAdmin, self).save_model(request, obj, form, change) | python | def save_model(self, request, obj, form, change):
"""
Clear menu cache when changing menu structure
"""
if 'config.menu_structure' in form.changed_data:
from menus.menu_pool import menu_pool
menu_pool.clear(all=True)
return super(BlogConfigAdmin, self).save_model(request, obj, form, change) | [
"def",
"save_model",
"(",
"self",
",",
"request",
",",
"obj",
",",
"form",
",",
"change",
")",
":",
"if",
"'config.menu_structure'",
"in",
"form",
".",
"changed_data",
":",
"from",
"menus",
".",
"menu_pool",
"import",
"menu_pool",
"menu_pool",
".",
"clear",
"(",
"all",
"=",
"True",
")",
"return",
"super",
"(",
"BlogConfigAdmin",
",",
"self",
")",
".",
"save_model",
"(",
"request",
",",
"obj",
",",
"form",
",",
"change",
")"
] | Clear menu cache when changing menu structure | [
"Clear",
"menu",
"cache",
"when",
"changing",
"menu",
"structure"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L453-L460 | train |
nephila/djangocms-blog | djangocms_blog/cms_wizards.py | PostWizardForm.clean_slug | def clean_slug(self):
"""
Generate a valid slug, in case the given one is taken
"""
source = self.cleaned_data.get('slug', '')
lang_choice = self.language_code
if not source:
source = slugify(self.cleaned_data.get('title', ''))
qs = Post._default_manager.active_translations(lang_choice).language(lang_choice)
used = list(qs.values_list('translations__slug', flat=True))
slug = source
i = 1
while slug in used:
slug = '%s-%s' % (source, i)
i += 1
return slug | python | def clean_slug(self):
"""
Generate a valid slug, in case the given one is taken
"""
source = self.cleaned_data.get('slug', '')
lang_choice = self.language_code
if not source:
source = slugify(self.cleaned_data.get('title', ''))
qs = Post._default_manager.active_translations(lang_choice).language(lang_choice)
used = list(qs.values_list('translations__slug', flat=True))
slug = source
i = 1
while slug in used:
slug = '%s-%s' % (source, i)
i += 1
return slug | [
"def",
"clean_slug",
"(",
"self",
")",
":",
"source",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'slug'",
",",
"''",
")",
"lang_choice",
"=",
"self",
".",
"language_code",
"if",
"not",
"source",
":",
"source",
"=",
"slugify",
"(",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'title'",
",",
"''",
")",
")",
"qs",
"=",
"Post",
".",
"_default_manager",
".",
"active_translations",
"(",
"lang_choice",
")",
".",
"language",
"(",
"lang_choice",
")",
"used",
"=",
"list",
"(",
"qs",
".",
"values_list",
"(",
"'translations__slug'",
",",
"flat",
"=",
"True",
")",
")",
"slug",
"=",
"source",
"i",
"=",
"1",
"while",
"slug",
"in",
"used",
":",
"slug",
"=",
"'%s-%s'",
"%",
"(",
"source",
",",
"i",
")",
"i",
"+=",
"1",
"return",
"slug"
] | Generate a valid slug, in case the given one is taken | [
"Generate",
"a",
"valid",
"slug",
"in",
"case",
"the",
"given",
"one",
"is",
"taken"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/cms_wizards.py#L55-L70 | train |
nephila/djangocms-blog | djangocms_blog/managers.py | TaggedFilterItem.tagged | def tagged(self, other_model=None, queryset=None):
"""
Restituisce una queryset di elementi del model taggati,
o con gli stessi tag di un model o un queryset
"""
tags = self._taglist(other_model, queryset)
return self.get_queryset().filter(tags__in=tags).distinct() | python | def tagged(self, other_model=None, queryset=None):
"""
Restituisce una queryset di elementi del model taggati,
o con gli stessi tag di un model o un queryset
"""
tags = self._taglist(other_model, queryset)
return self.get_queryset().filter(tags__in=tags).distinct() | [
"def",
"tagged",
"(",
"self",
",",
"other_model",
"=",
"None",
",",
"queryset",
"=",
"None",
")",
":",
"tags",
"=",
"self",
".",
"_taglist",
"(",
"other_model",
",",
"queryset",
")",
"return",
"self",
".",
"get_queryset",
"(",
")",
".",
"filter",
"(",
"tags__in",
"=",
"tags",
")",
".",
"distinct",
"(",
")"
] | Restituisce una queryset di elementi del model taggati,
o con gli stessi tag di un model o un queryset | [
"Restituisce",
"una",
"queryset",
"di",
"elementi",
"del",
"model",
"taggati",
"o",
"con",
"gli",
"stessi",
"tag",
"di",
"un",
"model",
"o",
"un",
"queryset"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/managers.py#L16-L22 | train |
nephila/djangocms-blog | djangocms_blog/managers.py | TaggedFilterItem._taglist | def _taglist(self, other_model=None, queryset=None):
"""
Restituisce una lista di id di tag comuni al model corrente e al model
o queryset passati come argomento
"""
from taggit.models import TaggedItem
filter = None
if queryset is not None:
filter = set()
for item in queryset.all():
filter.update(item.tags.all())
filter = set([tag.id for tag in filter])
elif other_model is not None:
filter = set(TaggedItem.objects.filter(
content_type__model=other_model.__name__.lower()
).values_list('tag_id', flat=True))
tags = set(TaggedItem.objects.filter(
content_type__model=self.model.__name__.lower()
).values_list('tag_id', flat=True))
if filter is not None:
tags = tags.intersection(filter)
return list(tags) | python | def _taglist(self, other_model=None, queryset=None):
"""
Restituisce una lista di id di tag comuni al model corrente e al model
o queryset passati come argomento
"""
from taggit.models import TaggedItem
filter = None
if queryset is not None:
filter = set()
for item in queryset.all():
filter.update(item.tags.all())
filter = set([tag.id for tag in filter])
elif other_model is not None:
filter = set(TaggedItem.objects.filter(
content_type__model=other_model.__name__.lower()
).values_list('tag_id', flat=True))
tags = set(TaggedItem.objects.filter(
content_type__model=self.model.__name__.lower()
).values_list('tag_id', flat=True))
if filter is not None:
tags = tags.intersection(filter)
return list(tags) | [
"def",
"_taglist",
"(",
"self",
",",
"other_model",
"=",
"None",
",",
"queryset",
"=",
"None",
")",
":",
"from",
"taggit",
".",
"models",
"import",
"TaggedItem",
"filter",
"=",
"None",
"if",
"queryset",
"is",
"not",
"None",
":",
"filter",
"=",
"set",
"(",
")",
"for",
"item",
"in",
"queryset",
".",
"all",
"(",
")",
":",
"filter",
".",
"update",
"(",
"item",
".",
"tags",
".",
"all",
"(",
")",
")",
"filter",
"=",
"set",
"(",
"[",
"tag",
".",
"id",
"for",
"tag",
"in",
"filter",
"]",
")",
"elif",
"other_model",
"is",
"not",
"None",
":",
"filter",
"=",
"set",
"(",
"TaggedItem",
".",
"objects",
".",
"filter",
"(",
"content_type__model",
"=",
"other_model",
".",
"__name__",
".",
"lower",
"(",
")",
")",
".",
"values_list",
"(",
"'tag_id'",
",",
"flat",
"=",
"True",
")",
")",
"tags",
"=",
"set",
"(",
"TaggedItem",
".",
"objects",
".",
"filter",
"(",
"content_type__model",
"=",
"self",
".",
"model",
".",
"__name__",
".",
"lower",
"(",
")",
")",
".",
"values_list",
"(",
"'tag_id'",
",",
"flat",
"=",
"True",
")",
")",
"if",
"filter",
"is",
"not",
"None",
":",
"tags",
"=",
"tags",
".",
"intersection",
"(",
"filter",
")",
"return",
"list",
"(",
"tags",
")"
] | Restituisce una lista di id di tag comuni al model corrente e al model
o queryset passati come argomento | [
"Restituisce",
"una",
"lista",
"di",
"id",
"di",
"tag",
"comuni",
"al",
"model",
"corrente",
"e",
"al",
"model",
"o",
"queryset",
"passati",
"come",
"argomento"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/managers.py#L24-L45 | train |
nephila/djangocms-blog | djangocms_blog/managers.py | TaggedFilterItem.tag_list | def tag_list(self, other_model=None, queryset=None):
"""
Restituisce un queryset di tag comuni al model corrente e
al model o queryset passati come argomento
"""
from taggit.models import Tag
return Tag.objects.filter(id__in=self._taglist(other_model, queryset)) | python | def tag_list(self, other_model=None, queryset=None):
"""
Restituisce un queryset di tag comuni al model corrente e
al model o queryset passati come argomento
"""
from taggit.models import Tag
return Tag.objects.filter(id__in=self._taglist(other_model, queryset)) | [
"def",
"tag_list",
"(",
"self",
",",
"other_model",
"=",
"None",
",",
"queryset",
"=",
"None",
")",
":",
"from",
"taggit",
".",
"models",
"import",
"Tag",
"return",
"Tag",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"self",
".",
"_taglist",
"(",
"other_model",
",",
"queryset",
")",
")"
] | Restituisce un queryset di tag comuni al model corrente e
al model o queryset passati come argomento | [
"Restituisce",
"un",
"queryset",
"di",
"tag",
"comuni",
"al",
"model",
"corrente",
"e",
"al",
"model",
"o",
"queryset",
"passati",
"come",
"argomento"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/managers.py#L47-L53 | train |
nephila/djangocms-blog | djangocms_blog/liveblog/consumers.py | liveblog_connect | def liveblog_connect(message, apphook, lang, post):
"""
Connect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug
"""
try:
post = Post.objects.namespace(apphook).language(lang).active_translations(slug=post).get()
except Post.DoesNotExist:
message.reply_channel.send({
'text': json.dumps({'error': 'no_post'}),
})
return
Group(post.liveblog_group).add(message.reply_channel)
message.reply_channel.send({"accept": True}) | python | def liveblog_connect(message, apphook, lang, post):
"""
Connect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug
"""
try:
post = Post.objects.namespace(apphook).language(lang).active_translations(slug=post).get()
except Post.DoesNotExist:
message.reply_channel.send({
'text': json.dumps({'error': 'no_post'}),
})
return
Group(post.liveblog_group).add(message.reply_channel)
message.reply_channel.send({"accept": True}) | [
"def",
"liveblog_connect",
"(",
"message",
",",
"apphook",
",",
"lang",
",",
"post",
")",
":",
"try",
":",
"post",
"=",
"Post",
".",
"objects",
".",
"namespace",
"(",
"apphook",
")",
".",
"language",
"(",
"lang",
")",
".",
"active_translations",
"(",
"slug",
"=",
"post",
")",
".",
"get",
"(",
")",
"except",
"Post",
".",
"DoesNotExist",
":",
"message",
".",
"reply_channel",
".",
"send",
"(",
"{",
"'text'",
":",
"json",
".",
"dumps",
"(",
"{",
"'error'",
":",
"'no_post'",
"}",
")",
",",
"}",
")",
"return",
"Group",
"(",
"post",
".",
"liveblog_group",
")",
".",
"add",
"(",
"message",
".",
"reply_channel",
")",
"message",
".",
"reply_channel",
".",
"send",
"(",
"{",
"\"accept\"",
":",
"True",
"}",
")"
] | Connect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug | [
"Connect",
"users",
"to",
"the",
"group",
"of",
"the",
"given",
"post",
"according",
"to",
"the",
"given",
"language"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/liveblog/consumers.py#L11-L30 | train |
nephila/djangocms-blog | djangocms_blog/liveblog/consumers.py | liveblog_disconnect | def liveblog_disconnect(message, apphook, lang, post):
"""
Disconnect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug
"""
try:
post = Post.objects.namespace(apphook).language(lang).active_translations(slug=post).get()
except Post.DoesNotExist:
message.reply_channel.send({
'text': json.dumps({'error': 'no_post'}),
})
return
Group(post.liveblog_group).discard(message.reply_channel) | python | def liveblog_disconnect(message, apphook, lang, post):
"""
Disconnect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug
"""
try:
post = Post.objects.namespace(apphook).language(lang).active_translations(slug=post).get()
except Post.DoesNotExist:
message.reply_channel.send({
'text': json.dumps({'error': 'no_post'}),
})
return
Group(post.liveblog_group).discard(message.reply_channel) | [
"def",
"liveblog_disconnect",
"(",
"message",
",",
"apphook",
",",
"lang",
",",
"post",
")",
":",
"try",
":",
"post",
"=",
"Post",
".",
"objects",
".",
"namespace",
"(",
"apphook",
")",
".",
"language",
"(",
"lang",
")",
".",
"active_translations",
"(",
"slug",
"=",
"post",
")",
".",
"get",
"(",
")",
"except",
"Post",
".",
"DoesNotExist",
":",
"message",
".",
"reply_channel",
".",
"send",
"(",
"{",
"'text'",
":",
"json",
".",
"dumps",
"(",
"{",
"'error'",
":",
"'no_post'",
"}",
")",
",",
"}",
")",
"return",
"Group",
"(",
"post",
".",
"liveblog_group",
")",
".",
"discard",
"(",
"message",
".",
"reply_channel",
")"
] | Disconnect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug | [
"Disconnect",
"users",
"to",
"the",
"group",
"of",
"the",
"given",
"post",
"according",
"to",
"the",
"given",
"language"
] | 3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/liveblog/consumers.py#L33-L51 | train |
tchellomello/python-amcrest | src/amcrest/video.py | Video.video_in_option | def video_in_option(self, param, profile='Day'):
"""
Return video input option.
Params:
param - parameter, such as 'DayNightColor'
profile - 'Day', 'Night' or 'Normal'
"""
if profile == 'Day':
field = param
else:
field = '{}Options.{}'.format(profile, param)
return utils.pretty(
[opt for opt in self.video_in_options.split()
if '].{}='.format(field) in opt][0]) | python | def video_in_option(self, param, profile='Day'):
"""
Return video input option.
Params:
param - parameter, such as 'DayNightColor'
profile - 'Day', 'Night' or 'Normal'
"""
if profile == 'Day':
field = param
else:
field = '{}Options.{}'.format(profile, param)
return utils.pretty(
[opt for opt in self.video_in_options.split()
if '].{}='.format(field) in opt][0]) | [
"def",
"video_in_option",
"(",
"self",
",",
"param",
",",
"profile",
"=",
"'Day'",
")",
":",
"if",
"profile",
"==",
"'Day'",
":",
"field",
"=",
"param",
"else",
":",
"field",
"=",
"'{}Options.{}'",
".",
"format",
"(",
"profile",
",",
"param",
")",
"return",
"utils",
".",
"pretty",
"(",
"[",
"opt",
"for",
"opt",
"in",
"self",
".",
"video_in_options",
".",
"split",
"(",
")",
"if",
"'].{}='",
".",
"format",
"(",
"field",
")",
"in",
"opt",
"]",
"[",
"0",
"]",
")"
] | Return video input option.
Params:
param - parameter, such as 'DayNightColor'
profile - 'Day', 'Night' or 'Normal' | [
"Return",
"video",
"input",
"option",
"."
] | ed842139e234de2eaf6ee8fb480214711cde1249 | https://github.com/tchellomello/python-amcrest/blob/ed842139e234de2eaf6ee8fb480214711cde1249/src/amcrest/video.py#L132-L146 | train |
tchellomello/python-amcrest | src/amcrest/http.py | Http._generate_token | def _generate_token(self):
"""Create authentation to use with requests."""
session = self.get_session()
url = self.__base_url('magicBox.cgi?action=getMachineName')
try:
# try old basic method
auth = requests.auth.HTTPBasicAuth(self._user, self._password)
req = session.get(url, auth=auth, timeout=self._timeout_default)
if not req.ok:
# try new digest method
auth = requests.auth.HTTPDigestAuth(
self._user, self._password)
req = session.get(
url, auth=auth, timeout=self._timeout_default)
req.raise_for_status()
except requests.RequestException as error:
_LOGGER.error(error)
raise CommError('Could not communicate with camera')
# check if user passed
result = req.text.lower()
if 'invalid' in result or 'error' in result:
_LOGGER.error('Result from camera: %s',
req.text.strip().replace('\r\n', ': '))
raise LoginError('Invalid credentials')
return auth | python | def _generate_token(self):
"""Create authentation to use with requests."""
session = self.get_session()
url = self.__base_url('magicBox.cgi?action=getMachineName')
try:
# try old basic method
auth = requests.auth.HTTPBasicAuth(self._user, self._password)
req = session.get(url, auth=auth, timeout=self._timeout_default)
if not req.ok:
# try new digest method
auth = requests.auth.HTTPDigestAuth(
self._user, self._password)
req = session.get(
url, auth=auth, timeout=self._timeout_default)
req.raise_for_status()
except requests.RequestException as error:
_LOGGER.error(error)
raise CommError('Could not communicate with camera')
# check if user passed
result = req.text.lower()
if 'invalid' in result or 'error' in result:
_LOGGER.error('Result from camera: %s',
req.text.strip().replace('\r\n', ': '))
raise LoginError('Invalid credentials')
return auth | [
"def",
"_generate_token",
"(",
"self",
")",
":",
"session",
"=",
"self",
".",
"get_session",
"(",
")",
"url",
"=",
"self",
".",
"__base_url",
"(",
"'magicBox.cgi?action=getMachineName'",
")",
"try",
":",
"# try old basic method",
"auth",
"=",
"requests",
".",
"auth",
".",
"HTTPBasicAuth",
"(",
"self",
".",
"_user",
",",
"self",
".",
"_password",
")",
"req",
"=",
"session",
".",
"get",
"(",
"url",
",",
"auth",
"=",
"auth",
",",
"timeout",
"=",
"self",
".",
"_timeout_default",
")",
"if",
"not",
"req",
".",
"ok",
":",
"# try new digest method",
"auth",
"=",
"requests",
".",
"auth",
".",
"HTTPDigestAuth",
"(",
"self",
".",
"_user",
",",
"self",
".",
"_password",
")",
"req",
"=",
"session",
".",
"get",
"(",
"url",
",",
"auth",
"=",
"auth",
",",
"timeout",
"=",
"self",
".",
"_timeout_default",
")",
"req",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"RequestException",
"as",
"error",
":",
"_LOGGER",
".",
"error",
"(",
"error",
")",
"raise",
"CommError",
"(",
"'Could not communicate with camera'",
")",
"# check if user passed",
"result",
"=",
"req",
".",
"text",
".",
"lower",
"(",
")",
"if",
"'invalid'",
"in",
"result",
"or",
"'error'",
"in",
"result",
":",
"_LOGGER",
".",
"error",
"(",
"'Result from camera: %s'",
",",
"req",
".",
"text",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'\\r\\n'",
",",
"': '",
")",
")",
"raise",
"LoginError",
"(",
"'Invalid credentials'",
")",
"return",
"auth"
] | Create authentation to use with requests. | [
"Create",
"authentation",
"to",
"use",
"with",
"requests",
"."
] | ed842139e234de2eaf6ee8fb480214711cde1249 | https://github.com/tchellomello/python-amcrest/blob/ed842139e234de2eaf6ee8fb480214711cde1249/src/amcrest/http.py#L73-L99 | train |
Subsets and Splits