Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def handle_g_error(error, return_value):
error = error[0]
assert bool(return_value) == (error == ffi.NULL)
if error != ffi.NULL:
if error.message != ffi.NULL:
message = ('Pixbuf error: ' +
ffi.string(error.message).decode('utf8', 'replace'))
else: # pragma: no cover
message = 'Pixbuf error'
glib.g_error_free(error)
raise ImageLoadingError(message)
|
[
"Convert a :c:type:`GError**` to a Python :exception:`ImageLoadingError`,\n and raise it.\n\n "
] |
Please provide a description of the function:def decode_to_pixbuf(image_data, width=None, height=None):
loader = ffi.gc(
gdk_pixbuf.gdk_pixbuf_loader_new(), gobject.g_object_unref)
error = ffi.new('GError **')
if width and height:
gdk_pixbuf.gdk_pixbuf_loader_set_size(loader, width, height)
handle_g_error(error, gdk_pixbuf.gdk_pixbuf_loader_write(
loader, ffi.new('guchar[]', image_data), len(image_data), error))
handle_g_error(error, gdk_pixbuf.gdk_pixbuf_loader_close(loader, error))
format_ = gdk_pixbuf.gdk_pixbuf_loader_get_format(loader)
format_name = (
ffi.string(gdk_pixbuf.gdk_pixbuf_format_get_name(format_))
.decode('ascii')
if format_ != ffi.NULL else None)
pixbuf = gdk_pixbuf.gdk_pixbuf_loader_get_pixbuf(loader)
if pixbuf == ffi.NULL: # pragma: no cover
raise ImageLoadingError('Not enough image data (got a NULL pixbuf.)')
return Pixbuf(pixbuf), format_name
|
[
"Decode an image from memory with GDK-PixBuf.\n The file format is detected automatically.\n\n :param image_data: A byte string\n :param width: Integer width in pixels or None\n :param height: Integer height in pixels or None\n :returns:\n A tuple of a new :class:`PixBuf` object\n and the name of the detected image format.\n :raises:\n :exc:`ImageLoadingError` if the image data is invalid\n or in an unsupported format.\n\n "
] |
Please provide a description of the function:def decode_to_image_surface(image_data, width=None, height=None):
pixbuf, format_name = decode_to_pixbuf(image_data, width, height)
surface = (
pixbuf_to_cairo_gdk(pixbuf) if gdk is not None
else pixbuf_to_cairo_slices(pixbuf) if not pixbuf.get_has_alpha()
else pixbuf_to_cairo_png(pixbuf))
return surface, format_name
|
[
"Decode an image from memory into a cairo surface.\n The file format is detected automatically.\n\n :param image_data: A byte string\n :param width: Integer width in pixels or None\n :param height: Integer height in pixels or None\n :returns:\n A tuple of a new :class:`~cairocffi.ImageSurface` object\n and the name of the detected image format.\n :raises:\n :exc:`ImageLoadingError` if the image data is invalid\n or in an unsupported format.\n\n "
] |
Please provide a description of the function:def pixbuf_to_cairo_gdk(pixbuf):
dummy_context = Context(ImageSurface(constants.FORMAT_ARGB32, 1, 1))
gdk.gdk_cairo_set_source_pixbuf(
dummy_context._pointer, pixbuf._pointer, 0, 0)
return dummy_context.get_source().get_surface()
|
[
"Convert from PixBuf to ImageSurface, using GDK.\n\n This method is fastest but GDK is not always available.\n\n "
] |
Please provide a description of the function:def pixbuf_to_cairo_slices(pixbuf):
assert pixbuf.get_colorspace() == gdk_pixbuf.GDK_COLORSPACE_RGB
assert pixbuf.get_n_channels() == 3
assert pixbuf.get_bits_per_sample() == 8
width = pixbuf.get_width()
height = pixbuf.get_height()
rowstride = pixbuf.get_rowstride()
pixels = ffi.buffer(pixbuf.get_pixels(), pixbuf.get_byte_length())
# TODO: remove this when cffi buffers support slicing with a stride.
pixels = pixels[:]
# Convert GdkPixbuf’s big-endian RGBA to cairo’s native-endian ARGB
cairo_stride = ImageSurface.format_stride_for_width(
constants.FORMAT_RGB24, width)
data = bytearray(cairo_stride * height)
big_endian = sys.byteorder == 'big'
pixbuf_row_length = width * 3 # stride == row_length + padding
cairo_row_length = width * 4 # stride == row_length + padding
alpha = b'\xff' * width # opaque
for y in range(height):
offset = rowstride * y
end = offset + pixbuf_row_length
red = pixels[offset:end:3]
green = pixels[offset + 1:end:3]
blue = pixels[offset + 2:end:3]
offset = cairo_stride * y
end = offset + cairo_row_length
if big_endian: # pragma: no cover
data[offset:end:4] = alpha
data[offset + 1:end:4] = red
data[offset + 2:end:4] = green
data[offset + 3:end:4] = blue
else:
data[offset + 3:end:4] = alpha
data[offset + 2:end:4] = red
data[offset + 1:end:4] = green
data[offset:end:4] = blue
data = array('B', data)
return ImageSurface(constants.FORMAT_RGB24,
width, height, data, cairo_stride)
|
[
"Convert from PixBuf to ImageSurface, using slice-based byte swapping.\n\n This method is 2~5x slower than GDK but does not support an alpha channel.\n (cairo uses pre-multiplied alpha, but not Pixbuf.)\n\n "
] |
Please provide a description of the function:def pixbuf_to_cairo_png(pixbuf):
buffer_pointer = ffi.new('gchar **')
buffer_size = ffi.new('gsize *')
error = ffi.new('GError **')
handle_g_error(error, pixbuf.save_to_buffer(
buffer_pointer, buffer_size, ffi.new('char[]', b'png'), error,
ffi.new('char[]', b'compression'), ffi.new('char[]', b'0'),
ffi.NULL))
png_bytes = ffi.buffer(buffer_pointer[0], buffer_size[0])
return ImageSurface.create_from_png(BytesIO(png_bytes))
|
[
"Convert from PixBuf to ImageSurface, by going through the PNG format.\n\n This method is 10~30x slower than GDK but always works.\n\n "
] |
Please provide a description of the function:def init_rotate(cls, radians):
result = cls()
cairo.cairo_matrix_init_rotate(result._pointer, radians)
return result
|
[
"Return a new :class:`Matrix` for a transformation\n that rotates by :obj:`radians`.\n\n :type radians: float\n :param radians:\n Angle of rotation, in radians.\n The direction of rotation is defined such that\n positive angles rotate in the direction\n from the positive X axis toward the positive Y axis.\n With the default axis orientation of cairo,\n positive angles rotate in a clockwise direction.\n\n "
] |
Please provide a description of the function:def as_tuple(self):
ptr = self._pointer
return (ptr.xx, ptr.yx, ptr.xy, ptr.yy, ptr.x0, ptr.y0)
|
[
"Return all of the matrix’s components.\n\n :returns: A ``(xx, yx, xy, yy, x0, y0)`` tuple of floats.\n\n "
] |
Please provide a description of the function:def multiply(self, other):
res = Matrix()
cairo.cairo_matrix_multiply(
res._pointer, self._pointer, other._pointer)
return res
|
[
"Multiply with another matrix\n and return the result as a new :class:`Matrix` object.\n Same as ``self * other``.\n\n "
] |
Please provide a description of the function:def translate(self, tx, ty):
cairo.cairo_matrix_translate(self._pointer, tx, ty)
|
[
"Applies a translation by :obj:`tx`, :obj:`ty`\n to the transformation in this matrix.\n\n The effect of the new transformation is to\n first translate the coordinates by :obj:`tx` and :obj:`ty`,\n then apply the original transformation to the coordinates.\n\n .. note::\n This changes the matrix in-place.\n\n :param tx: Amount to translate in the X direction.\n :param ty: Amount to translate in the Y direction.\n :type tx: float\n :type ty: float\n\n "
] |
Please provide a description of the function:def scale(self, sx, sy=None):
if sy is None:
sy = sx
cairo.cairo_matrix_scale(self._pointer, sx, sy)
|
[
"Applies scaling by :obj:`sx`, :obj:`sy`\n to the transformation in this matrix.\n\n The effect of the new transformation is to\n first scale the coordinates by :obj:`sx` and :obj:`sy`,\n then apply the original transformation to the coordinates.\n\n If :obj:`sy` is omitted, it is the same as :obj:`sx`\n so that scaling preserves aspect ratios.\n\n .. note::\n This changes the matrix in-place.\n\n :param sx: Scale factor in the X direction.\n :param sy: Scale factor in the Y direction.\n :type sx: float\n :type sy: float\n\n "
] |
Please provide a description of the function:def transform_point(self, x, y):
xy = ffi.new('double[2]', [x, y])
cairo.cairo_matrix_transform_point(self._pointer, xy + 0, xy + 1)
return tuple(xy)
|
[
"Transforms the point ``(x, y)`` by this matrix.\n\n :param x: X position.\n :param y: Y position.\n :type x: float\n :type y: float\n :returns: A ``(new_x, new_y)`` tuple of floats.\n\n "
] |
Please provide a description of the function:def transform_distance(self, dx, dy):
xy = ffi.new('double[2]', [dx, dy])
cairo.cairo_matrix_transform_distance(self._pointer, xy + 0, xy + 1)
return tuple(xy)
|
[
"Transforms the distance vector ``(dx, dy)`` by this matrix.\n This is similar to :meth:`transform_point`\n except that the translation components of the transformation\n are ignored.\n The calculation of the returned vector is as follows::\n\n dx2 = dx1 * xx + dy1 * xy\n dy2 = dx1 * yx + dy1 * yy\n\n Affine transformations are position invariant,\n so the same vector always transforms to the same vector.\n If ``(x1, y1)`` transforms to ``(x2, y2)``\n then ``(x1 + dx1, y1 + dy1)`` will transform\n to ``(x1 + dx2, y1 + dy2)`` for all values of ``x1`` and ``x2``.\n\n :param dx: X component of a distance vector.\n :param dy: Y component of a distance vector.\n :type dx: float\n :type dy: float\n :returns: A ``(new_dx, new_dy)`` tuple of floats.\n\n "
] |
Please provide a description of the function:def toggleTransparency(self, force_value=None):
if force_value is None:
self._transparent = not self._transparent
else:
self._transparent = force_value
self.restoreActiveTheme()
|
[
" Toggles theme trasparency.\n\n force_value will set trasparency if True or False,\n or toggle trasparency if None\n "
] |
Please provide a description of the function:def keypress(self, char):
if char in (curses.KEY_ENTER, ord('\n'),
ord('\r'), ord('l'),
curses.KEY_RIGHT):
self._applied_theme = self._selection
self._applied_theme_name = self._themes[self._selection][0]
#if self.changed_from_config:
# self._config_theme = self._selection
# self._config_theme_name = self._themes[self._selection][0]
self.refresh()
return self._selection, False
elif char in (ord(' '), ord('s')):
self._applied_theme = self._selection
self._applied_theme_name = self._themes[self._selection][0]
if not self.changed_from_config:
self._config_theme = self._selection
self._config_theme_name = self._themes[self._selection][0]
if char == ord('s'):
# close window
curses.ungetch('q')
else:
self.refresh()
return self._selection, True
elif char in (curses.KEY_UP, ord('k')):
self.jumpnr = ''
self._go_up()
elif char in (curses.KEY_DOWN, ord('j')):
self.jumpnr = ''
self._go_down()
elif char in (curses.KEY_HOME, ord('g')):
self.jumpnr = ''
self._go_home()
elif char in (curses.KEY_END, ord('G')):
if self.jumpnr == '':
self._go_end()
else:
num = int(self.jumpnr) - 1
if num >= 0:
self.selection = num
self.jumpnr = ''
elif char in (curses.KEY_NPAGE, ):
self.jumpnr = ''
sel = self._selection + self._page_jump
if self._selection == len(self._themes) - 1:
sel = 0
elif sel >= len(self._themes):
sel = len(self._themes) - 1
self.selection = sel
elif char in (curses.KEY_PPAGE, ):
self.jumpnr = ''
sel = self._selection - self._page_jump
if self._selection == 0:
sel = len(self._themes) - 1
elif sel < 0:
sel = 0
self.selection = sel
elif char in map(ord,map(str,range(0,10))):
self.jumpnr += chr(char)
elif char in (curses.KEY_EXIT, 27, ord('q'), ord('h'), curses.KEY_LEFT):
self.jumpnr = ''
self._win.nodelay(True)
char = self._win.getch()
self._win.nodelay(False)
if char == -1:
if not self.changed_from_config:
if self._applied_theme_name != self._config_theme_name:
if logger.isEnabledFor(logging.INFO):
logger.info('Restoring saved theme: {}'.format(self._config_theme_name))
self._theme.readAndApplyTheme(self._config_theme_name)
self._applied_theme = self._config_theme
self._applied_theme_name = self._config_theme_name
self.selection = -1
return -1, False
return -2, False
|
[
" returns theme_id, save_theme\n return_id\n 0-.. : id in self._theme\n -1 : end or canel\n -2 : go no\n save_them\n True : theme is to be saved in config\n False : theme is not to be saved in config\n ",
" ESCAPE "
] |
Please provide a description of the function:def probePlayer(requested_player=''):
ret_player = None
if logger.isEnabledFor(logging.INFO):
logger.info("Probing available multimedia players...")
implementedPlayers = Player.__subclasses__()
if logger.isEnabledFor(logging.INFO):
logger.info("Implemented players: " +
", ".join([player.PLAYER_CMD
for player in implementedPlayers]))
if requested_player:
req = requested_player.split(',')
for r_player in req:
if r_player == 'vlc':
r_player = 'cvlc'
for player in implementedPlayers:
if player.PLAYER_CMD == r_player:
ret_player = check_player(player)
if ret_player is not None:
return ret_player
if ret_player is None:
if logger.isEnabledFor(logging.INFO):
logger.info('Requested player "{}" not supported'.format(r_player))
else:
for player in implementedPlayers:
ret_player = check_player(player)
if ret_player is not None:
break
return ret_player
|
[
" Probes the multimedia players which are available on the host\n system."
] |
Please provide a description of the function:def play(self, name, streamUrl, encoding = ''):
self.close()
self.name = name
self.oldUserInput = {'Input': '', 'Volume': '', 'Title': ''}
self.muted = False
self.show_volume = True
self.title_prefix = ''
self.playback_is_on = False
self.outputStream.write('Station: "{}"'.format(name), self.status_update_lock)
if logger.isEnabledFor(logging.INFO):
logger.info('Selected Station: "{}"'.format(name))
if encoding:
self._station_encoding = encoding
else:
self._station_encoding = 'utf-8'
opts = []
isPlayList = streamUrl.split("?")[0][-3:] in ['m3u', 'pls']
opts = self._buildStartOpts(streamUrl, isPlayList)
self.process = subprocess.Popen(opts, shell=False,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=self.updateStatus, args=(self.status_update_lock, ))
t.start()
# start playback check timer thread
try:
self.connection_timeout_thread = threading.Timer(self.playback_timeout, self.playback_timeout_handler)
self.connection_timeout_thread.start()
except:
self.connection_timeout_thread = None
if (logger.isEnabledFor(logging.ERROR)):
logger.error("playback detection thread start failed")
if logger.isEnabledFor(logging.INFO):
logger.info("Player started")
|
[
" use a multimedia player to play a stream "
] |
Please provide a description of the function:def _sendCommand(self, command):
if(self.process is not None):
try:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Command: {}".format(command).strip())
self.process.stdin.write(command.encode('utf-8', 'replace'))
self.process.stdin.flush()
except:
msg = "Error when sending: {}"
if logger.isEnabledFor(logging.ERROR):
logger.error(msg.format(command).strip(), exc_info=True)
|
[
" send keystroke command to player "
] |
Please provide a description of the function:def close(self):
self._no_mute_on_stop_playback()
# First close the subprocess
self._stop()
# Here is fallback solution and cleanup
if self.connection_timeout_thread is not None:
self.connection_timeout_thread.cancel()
if self.delay_thread is not None:
self.delay_thread.cancel()
if self.process is not None:
os.kill(self.process.pid, 15)
self.process.wait()
self.process = None
|
[
" exit pyradio (and kill player instance) "
] |
Please provide a description of the function:def toggleMute(self):
if not self.muted:
self._mute()
if self.delay_thread is not None:
self.delay_thread.cancel()
self.title_prefix = '[Muted] '
self.muted = True
self.show_volume = False
else:
self._mute()
self.title_prefix = ''
self.muted = False
self.show_volume = True
if self.oldUserInput['Title'] == '':
self.outputStream.write(self.title_prefix + self._format_title_string(self.oldUserInput['Input']))
else:
self.outputStream.write(self.title_prefix + self._format_title_string(self.oldUserInput['Title']))
|
[
" mute / unmute player "
] |
Please provide a description of the function:def _configHasProfile(self):
for i, config_file in enumerate(self.config_files):
if os.path.exists(config_file):
with open(config_file) as f:
config_string = f.read()
if "[pyradio]" in config_string:
if i == 0:
self.PROFILE_FROM_USER = True
return 1
return 0
|
[
" Checks if mpv config has [pyradio] entry / profile.\n\n Profile example:\n\n [pyradio]\n volume-max=300\n volume=50"
] |
Please provide a description of the function:def _buildStartOpts(self, streamUrl, playList=False):
p = subprocess.Popen([self.PLAYER_CMD, "--input-ipc-server"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=False)
out = p.communicate()
if "not found" not in str(out[0]):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("--input-ipc-server is supported.")
newerMpv = 1;
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("--input-ipc-server is not supported.")
newerMpv = 0;
if playList:
if newerMpv:
opts = [self.PLAYER_CMD, "--quiet", "--playlist", streamUrl, "--input-ipc-server=/tmp/mpvsocket"]
else:
opts = [self.PLAYER_CMD, "--quiet", "--playlist", streamUrl, "--input-unix-socket=/tmp/mpvsocket"]
else:
if newerMpv:
opts = [self.PLAYER_CMD, "--quiet", streamUrl, "--input-ipc-server=/tmp/mpvsocket"]
else:
opts = [self.PLAYER_CMD, "--quiet", streamUrl, "--input-unix-socket=/tmp/mpvsocket"]
if self.USE_PROFILE == -1:
self.USE_PROFILE = self._configHasProfile()
if self.USE_PROFILE == 1:
opts.append("--profile=pyradio")
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug("using profile [pyradio]")
return opts
|
[
" Builds the options to pass to subprocess.",
" Test for newer MPV versions as it supports different IPC flags. "
] |
Please provide a description of the function:def _format_title_string(self, title_string):
return self._title_string_format_text_tag(title_string.replace(self.icy_tokkens[0], self.icy_title_prefix))
|
[
" format mpv's title "
] |
Please provide a description of the function:def _buildStartOpts(self, streamUrl, playList=False):
if playList:
opts = [self.PLAYER_CMD, "-quiet", "-playlist", streamUrl]
else:
opts = [self.PLAYER_CMD, "-quiet", streamUrl]
if self.USE_PROFILE == -1:
self.USE_PROFILE = self._configHasProfile()
if self.USE_PROFILE == 1:
opts.append("-profile")
opts.append("pyradio")
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug("using profile [pyradio]")
return opts
|
[
" Builds the options to pass to subprocess."
] |
Please provide a description of the function:def _format_title_string(self, title_string):
if "StreamTitle='" in title_string:
tmp = title_string[title_string.find("StreamTitle='"):].replace("StreamTitle='", self.icy_title_prefix)
ret_string = tmp[:tmp.find("';")]
else:
ret_string = title_string
if '"artist":"' in ret_string:
ret_string = self.icy_title_prefix + ret_string[ret_string.find('"artist":')+10:].replace('","title":"', ' - ').replace('"}\';', '')
return self._title_string_format_text_tag(ret_string)
|
[
" format mplayer's title ",
" work on format:\n ICY Info: START_SONG='{\"artist\":\"Clelia Cafiero\",\"title\":\"M. Mussorgsky-Quadri di un'esposizione\"}';\n Fund on \"ClassicaViva Web Radio: Classical\"\n "
] |
Please provide a description of the function:def _format_volume_string(self, volume_string):
return '[' + volume_string[volume_string.find(self.volume_string):].replace(' %','%').replace('ume', '')+'] '
|
[
" format mplayer's volume "
] |
Please provide a description of the function:def _buildStartOpts(self, streamUrl, playList=False):
#opts = [self.PLAYER_CMD, "-Irc", "--quiet", streamUrl]
opts = [self.PLAYER_CMD, "-Irc", "-vv", streamUrl]
return opts
|
[
" Builds the options to pass to subprocess."
] |
Please provide a description of the function:def _mute(self):
if self.muted:
self._sendCommand("volume {}\n".format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC unmuted: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume)))
else:
if self.actual_volume == -1:
self._get_volume()
self._sendCommand("volume 0\n")
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC muted: 0 (0%)')
|
[
" mute vlc "
] |
Please provide a description of the function:def _format_volume_string(self, volume_string):
self.actual_volume = int(volume_string.split(self.volume_string)[1].split(',')[0].split()[0])
return '[Vol: {}%] '.format(int(100 * self.actual_volume / self.max_volume))
|
[
" format vlc's volume "
] |
Please provide a description of the function:def _format_title_string(self, title_string):
sp = title_string.split(self.icy_tokkens[0])
if sp[0] == title_string:
ret_string = title_string
else:
ret_string = self.icy_title_prefix + sp[1]
return self._title_string_format_text_tag(ret_string)
|
[
" format vlc's title "
] |
Please provide a description of the function:def _is_accepted_input(self, input_string):
ret = False
accept_filter = (self.volume_string, "http stream debug: ")
reject_filter = ()
for n in accept_filter:
if n in input_string:
ret = True
break
if ret:
for n in reject_filter:
if n in input_string:
ret = False
break
return ret
|
[
" vlc input filtering "
] |
Please provide a description of the function:def _no_mute_on_stop_playback(self):
if self.ctrl_c_pressed:
return
if self.isPlaying():
if self.actual_volume == -1:
self._get_volume()
while self.actual_volume == -1:
pass
if self.actual_volume == 0:
self.actual_volume = int(self.max_volume*0.25)
self._sendCommand('volume {}\n'.format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Unmuting VLC on exit: {} (25%)'.format(self.actual_volume))
elif self.muted:
if self.actual_volume > 0:
self._sendCommand('volume {}\n'.format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC volume restored on exit: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume)))
self.show_volume = True
|
[
" make sure vlc does not stop muted "
] |
Please provide a description of the function:def shell():
version_too_old = False
if sys.version_info[0] == 2:
if sys.version_info < (2, 7):
version_too_old = True
elif sys.version_info.major == 3 and sys.version_info < (3, 5):
version_too_old = True
if version_too_old:
print('Pyradio requires python 2.7 or 3.5+...')
sys.exit(1)
# set window title
try:
sys.stdout.write("\x1b]2;PyRadio: The Internet Radio player\x07")
except:
pass
requested_player = ''
parser = ArgumentParser(description="Curses based Internet radio player")
parser.add_argument("-s", "--stations", default='',
help="Use specified station CSV file.")
parser.add_argument("-p", "--play", nargs='?', default='False',
help="Start and play."
"The value is num station or empty for random.")
parser.add_argument("-u", "--use-player", default='',
help="Use specified player. "
"A comma-separated list can be used to specify detection order. "
"Supported players: mpv, mplayer, vlc.")
parser.add_argument("-a", "--add", action='store_true',
help="Add station to list.")
parser.add_argument("-ls", "--list-playlists", action='store_true',
help="List of available playlists in config dir.")
parser.add_argument("-l", "--list", action='store_true',
help="List of available stations in a playlist.")
parser.add_argument("-t", "--theme", default='', help="Use specified theme. ")
parser.add_argument("-scd", "--show-config-dir", action='store_true',
help="Print config directory location and exit.")
parser.add_argument("-ocd", "--open-config-dir", action='store_true',
help="Open config directory with default file manager.")
parser.add_argument("-d", "--debug", action='store_true',
help="Start pyradio in debug mode.")
args = parser.parse_args()
sys.stdout.flush()
pyradio_config = PyRadioConfig()
if args.show_config_dir:
print('PyRadio config dir: "{}"'.format(pyradio_config.stations_dir))
sys.exit()
if args.open_config_dir:
open_conf_dir(pyradio_config)
sys.exit()
if args.list_playlists:
pyradio_config.list_playlists()
sys.exit()
if args.list is False and args.add is False:
print('Reading config...')
ret = pyradio_config.read_config()
if ret == -1:
print('Error opening config: "{}"'.format(pyradio_config.config_file))
sys.exit(1)
elif ret == -2:
print('Config file is malformed: "{}"'.format(pyradio_config.config_file))
sys.exit(1)
if args.use_player != '':
requested_player = args.use_player
if args.list is False and args.add is False:
print('Reading playlist...')
sys.stdout.flush()
ret = pyradio_config.read_playlist_file(args.stations)
if ret < 0:
print_playlist_selection_error(args.stations, pyradio_config, ret)
# No need to parse the file if we add station
# Actually we do need to do so now, so that we
# handle 2-column vs. 3-column playlists
if args.add:
if sys.version_info < (3, 0):
params = raw_input("Enter the name: "), raw_input("Enter the url: "), raw_input("Enter the encoding (leave empty for 'utf-8'): ")
else:
params = input("Enter the name: "), input("Enter the url: "), input("Enter the encoding (leave empty for 'utf-8'): ")
msg = ('name', 'url')
for i, a_param in enumerate(params):
if i < 2:
if a_param.strip() == '':
print('** Error: No {} entered. Aborting...'.format(msg[i]))
sys.exit(1)
ret = pyradio_config.append_station(params, args.stations)
if ret < 0:
print_playlist_selection_error(args.stations, pyradio_config, ret)
sys.exit()
if args.list:
header_format_string, format_string = get_format_string(pyradio_config.stations)
header_string = header_format_string.format('[Name]','[URL]','[Encoding]')
print(header_string)
print(len(header_string) * '-')
for num, a_station in enumerate(pyradio_config.stations):
if a_station[2]:
encoding = a_station[2]
else:
encoding = pyradio_config.default_encoding
print(format_string.format(str(num+1), a_station[0], a_station[1], encoding))
sys.exit()
if args.debug:
__configureLogger()
print('Debug mode activated; printing messages to file: "~/pyradio.log"')
else:
''' Refer to https://docs.python.org/3.7/howto/logging.html
section "What happens if no configuration is provided"
'''
logging.raiseExceptions = False
logging.lastResort = None
if requested_player is '':
requested_player = pyradio_config.player
#else:
# pyradio_config.requested_player = requested_player
if args.play == 'False':
if args.stations == '':
args.play = pyradio_config.default_station
if args.play == '-1':
args.play = 'False'
theme_to_use = args.theme
if not theme_to_use:
theme_to_use = pyradio_config.theme
# Starts the radio gui.
pyradio = PyRadio(pyradio_config,
play=args.play,
req_player=requested_player,
theme=theme_to_use)
environ.setdefault('ESCDELAY', '25')
curses.wrapper(pyradio.setup)
if not pyradio.setup_return_status:
print('\nThis terminal can not display colors.\nPyRadio cannot function in such a terminal.\n')
|
[
" Setting ESCAPE key delay to 25ms\n Refer to: https://stackoverflow.com/questions/27372068/why-does-the-escape-key-have-a-delay-in-python-curses"
] |
Please provide a description of the function:def _move_old_csv(self, usr):
src = path.join(getenv('HOME', '~'), '.pyradio')
dst = path.join(usr, 'pyradio.csv')
dst1 = path.join(usr, 'stations.csv')
if path.exists(src) and path.isfile(src):
if path.exists(dst1):
copyfile(src, dst)
else:
copyfile(src, dst1)
try:
remove(src)
except:
pass
|
[
" if a ~/.pyradio files exists, relocate it in user\n config folder and rename it to stations.csv, or if\n that exists, to pyradio.csv "
] |
Please provide a description of the function:def _check_stations_csv(self, usr, root):
''' Reclocate a stations.csv copy in user home for easy manage.
E.g. not need sudo when you add new station, etc '''
if path.exists(path.join(usr, 'stations.csv')):
return
else:
copyfile(root, path.join(usr, 'stations.csv'))
|
[] |
Please provide a description of the function:def copy_playlist_to_config_dir(self):
ret = 0
st = path.join(self.stations_dir, self.stations_filename_only)
if path.exists(st):
ret = 1
st = datetime.now().strftime("%Y-%m-%d_%H-%M-%S_")
st = path.join(self.stations_dir, st + self.stations_filename_only)
try:
copyfile(self.stations_file, st)
except:
if logger.isEnabledFor(logging.ERROR):
logger.error('Cannot copy playlist: "{}"'.format(self.stations_file))
ret = -1
return
self._get_playlist_elements(st)
self.foreign_file = False
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Playlist copied to: "{}"'.format(self.stations_filename_only_no_extension))
return ret
|
[
" Copy a foreign playlist in config dir\n Returns:\n -1: error copying file\n 0: success\n 1: playlist renamed\n "
] |
Please provide a description of the function:def _is_playlist_in_config_dir(self):
if path.dirname(self.stations_file) == self.stations_dir:
self.foreign_file = False
self.foreign_filename_only_no_extension = ''
else:
self.foreign_file = True
self.foreign_filename_only_no_extension = self.stations_filename_only_no_extension
self.foreign_copy_asked = False
|
[
" Check if a csv file is in the config dir "
] |
Please provide a description of the function:def _get_playlist_abspath_from_data(self, stationFile=''):
ret = -1
orig_input = stationFile
if stationFile:
if stationFile.endswith('.csv'):
stationFile = path.abspath(stationFile)
else:
stationFile += '.csv'
stationFile = path.join(self.stations_dir, stationFile)
if path.exists(stationFile) and path.isfile(stationFile):
return stationFile, 0
else:
for p in [path.join(self.stations_dir, 'pyradio.csv'),
path.join(self.stations_dir, 'stations.csv'),
self.root_path]:
if path.exists(p) and path.isfile(p):
return p, 0
if ret == -1:
if orig_input.replace('-', '').isdigit():
sel = int(orig_input) - 1
if sel == -1:
stationFile = path.join(self.stations_dir, 'stations.csv')
return stationFile, 0
elif sel < 0:
return '', -3
else:
n, f = self.read_playlists()
if sel <= n-1:
stationFile=self.playlists[sel][-1]
return stationFile, 0
else:
return '', -4
else:
return '', -2
|
[
" Get playlist absolute path\n Returns: playlist path, result\n Result is:\n 0 - playlist found\n -2 - playlist not found\n -3 - negative number specified\n -4 - number not found\n ",
" relative or absolute path ",
" try to find it in config dir ",
" Check if playlist number was specified ",
" negative playlist number ",
" playlist number sel does not exit "
] |
Please provide a description of the function:def read_playlist_file(self, stationFile=''):
prev_file = self.stations_file
prev_format = self.new_format
self.new_format = False
ret = 0
stationFile, ret = self._get_playlist_abspath_from_data(stationFile)
if ret < 0:
return ret
self._reading_stations = []
with open(stationFile, 'r') as cfgfile:
try:
for row in csv.reader(filter(lambda row: row[0]!='#', cfgfile), skipinitialspace=True):
if not row:
continue
try:
name, url = [s.strip() for s in row]
self._reading_stations.append((name, url, ''))
except:
name, url, enc = [s.strip() for s in row]
self._reading_stations.append((name, url, enc))
self.new_format = True
except:
self._reading_stations = []
self.new_format = prev_format
return -1
self.stations = list(self._reading_stations)
self._reading_stations = []
self._get_playlist_elements(stationFile)
self.previous_stations_file = prev_file
self._is_playlist_in_config_dir()
self.number_of_stations = len(self.stations)
self.dirty_playlist = False
if logger.isEnabledFor(logging.DEBUG):
if self.new_format:
logger.debug('Playlist is in new format')
else:
logger.debug('Playlist is in old format')
return self.number_of_stations
|
[
" Read a csv file\n Returns: number\n x - number of stations or\n -1 - playlist is malformed\n -2 - playlist not found\n "
] |
Please provide a description of the function:def _playlist_format_changed(self):
new_format = False
for n in self.stations:
if n[2] != '':
new_format = True
break
if self.new_format == new_format:
return False
else:
return True
|
[
" Check if we have new or old format\n and report if format has changed\n\n Format type can change by editing encoding,\n deleting a non-utf-8 station etc.\n "
] |
Please provide a description of the function:def save_playlist_file(self, stationFile=''):
if self._playlist_format_changed():
self.dirty_playlist = True
self.new_format = not self.new_format
if stationFile:
st_file = stationFile
else:
st_file = self.stations_file
if not self.dirty_playlist:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Playlist not modified...')
return 0
st_new_file = st_file.replace('.csv', '.txt')
tmp_stations = self.stations[:]
tmp_stations.reverse()
if self.new_format:
tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '', '' ])
else:
tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '' ])
tmp_stations.reverse()
try:
with open(st_new_file, 'w') as cfgfile:
writter = csv.writer(cfgfile)
for a_station in tmp_stations:
writter.writerow(self._format_playlist_row(a_station))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Cannot open playlist file for writing,,,')
return -1
try:
move(st_new_file, st_file)
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Cannot rename playlist file...')
return -2
self.dirty_playlist = False
return 0
|
[
" Save a playlist\n Create a txt file and write stations in it.\n Then rename it to final target\n\n return 0: All ok\n -1: Error writing file\n -2: Error renaming file\n "
] |
Please provide a description of the function:def _bytes_to_human(self, B):
''' Return the given bytes as a human friendly KB, MB, GB, or TB string '''
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0} B'.format(B)
B = float(B)
if KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
|
[] |
Please provide a description of the function:def append_station(self, params, stationFile=''):
if self.new_format:
if stationFile:
st_file = stationFile
else:
st_file = self.stations_file
st_file, ret = self._get_playlist_abspath_from_data(st_file)
if ret < -1:
return ret
try:
with open(st_file, 'a') as cfgfile:
writter = csv.writer(cfgfile)
writter.writerow(params)
return 0
except:
return -5
else:
self.stations.append([ params[0], params[1], params[2] ])
self.dirty_playlist = True
st_file, ret = self._get_playlist_abspath_from_data(stationFile)
if ret < -1:
return ret
ret = self.save_playlist_file(st_file)
if ret < 0:
ret -= 4
return ret
|
[
" Append a station to csv file\n\n return 0: All ok\n -2 - playlist not found\n -3 - negative number specified\n -4 - number not found\n -5: Error writing file\n -6: Error renaming file\n "
] |
Please provide a description of the function:def read_playlists(self):
self.playlists = []
self.selected_playlist = -1
files = glob.glob(path.join(self.stations_dir, '*.csv'))
if len(files) == 0:
return 0, -1
else:
for a_file in files:
a_file_name = ''.join(path.basename(a_file).split('.')[:-1])
a_file_size = self._bytes_to_human(path.getsize(a_file))
a_file_time = ctime(path.getmtime(a_file))
self.playlists.append([a_file_name, a_file_time, a_file_size, a_file])
self.playlists.sort()
for i, a_playlist in enumerate(self.playlists):
if a_playlist[-1] == self.stations_file:
self.selected_playlist = i
break
return len(self.playlists), self.selected_playlist
|
[
" get already loaded playlist id "
] |
Please provide a description of the function:def _check_config_file(self, usr):
''' Make sure a config file exists in the config dir '''
package_config_file = path.join(path.dirname(__file__), 'config')
user_config_file = path.join(usr, 'config')
''' restore config from bck file '''
if path.exists(user_config_file + '.restore'):
try:
copyfile(user_config_file + '.restore', user_config_file)
remove(self.user_config_file + '.restore')
except:
pass
''' Copy package config into user dir '''
if not path.exists(user_config_file):
copyfile(package_config_file, user_config_file)
|
[] |
Please provide a description of the function:def save_config(self):
if not self.opts['dirty_config'][1]:
if logger.isEnabledFor(logging.INFO):
logger.info('Config not saved (not modified)')
return 1
txt ='''# PyRadio Configuration File
# Player selection
# This is the equivalent to the -u , --use-player command line parameter
# Specify the player to use with PyRadio, or the player detection order
# Example:
# player = vlc
# or
# player = vlc,mpv, mplayer
# Default value: mpv,mplayer,vlc
player = {0}
# Default playlist
# This is the playlist to open if none is specified
# You can scecify full path to CSV file, or if the playlist is in the
# config directory, playlist name (filename without extension) or
# playlist number (as reported by -ls command line option)
# Default value: stations
default_playlist = {1}
# Default station
# This is the equivalent to the -p , --play command line parameter
# The station number within the default playlist to play
# Value is 1..number of stations, "-1" or "False" means no auto play
# "0" or "Random" means play a random station
# Default value: False
default_station = {2}
# Default encoding
# This is the encoding used by default when reading data provided by
# a station (such as song title, etc.) If reading said data ends up
# in an error, 'utf-8' will be used instead.
#
# A valid encoding list can be found at:
# https://docs.python.org/2.7/library/codecs.html#standard-encodings
# replacing 2.7 with specific version:
# 3.0 up to current python version.
#
# Default value: utf-8
default_encoding = {3}
# Connection timeout
# PyRadio will wait for this number of seconds to get a station/server
# message indicating that playback has actually started.
# If this does not happen (within this number of seconds after the
# connection is initiated), PyRadio will consider the station
# unreachable, and display the "Failed to connect to: [station]"
# message.
#
# Valid values: 5 - 60
# Default value: 10
connection_timeout = {4}
# Default theme
# Hardcooded themes:
# dark (default) (8 colors)
# light (8 colors)
# dark_16_colors (16 colors dark theme alternative)
# light_16_colors (16 colors light theme alternative)
# black_on_white (bow) (256 colors)
# white_on_black (wob) (256 colors)
# Default value = 'dark'
theme = {5}
# Transparency setting
# If False, theme colors will be used.
# If True and a compositor is running, the stations' window
# background will be transparent. If True and a compositor is
# not running, the terminal's background color will be used.
# Valid values: True, true, False, false
# Default value: False
use_transparency = {6}
# Playlist management
#
# Specify whether you will be asked to confirm
# every station deletion action
# Valid values: True, true, False, false
# Default value: True
confirm_station_deletion = {7}
# Specify whether you will be asked to confirm
# playlist reloading, when the playlist has not
# been modified within Pyradio
# Valid values: True, true, False, false
# Default value: True
confirm_playlist_reload = {8}
# Specify whether you will be asked to save a
# modified playlist whenever it needs saving
# Valid values: True, true, False, false
# Default value: False
auto_save_playlist = {9}
'''
copyfile(self.config_file, self.config_file + '.restore')
if self.opts['default_station'][1] is None:
self.opts['default_station'][1] = '-1'
try:
with open(self.config_file, 'w') as cfgfile:
cfgfile.write(txt.format(self.opts['player'][1],
self.opts['default_playlist'][1],
self.opts['default_station'][1],
self.opts['default_encoding'][1],
self.opts['connection_timeout'][1],
self.opts['theme'][1],
self.opts['use_transparency'][1],
self.opts['confirm_station_deletion'][1],
self.opts['confirm_playlist_reload'][1],
self.opts['auto_save_playlist'][1]))
except:
if logger.isEnabledFor(logging.ERROR):
logger.error('Error saving config')
return -1
try:
remove(self.config_file + '.restore')
except:
pass
if logger.isEnabledFor(logging.INFO):
logger.info('Config saved')
self.opts['dirty_config'][1] = False
return 0
|
[
" Save config file\n\n Creates config.restore (back up file)\n Returns:\n -1: Error saving config\n 0: Config saved successfully\n 1: Config not saved (not modified"
] |
Please provide a description of the function:def initBody(self):
#self.bodyWin.timeout(100)
#self.bodyWin.keypad(1)
self.bodyMaxY, self.bodyMaxX = self.bodyWin.getmaxyx()
self.bodyWin.noutrefresh()
if self.operation_mode == NO_PLAYER_ERROR_MODE:
if self.requested_player:
txt =
else:
txt =
self.refreshNoPlayerBody(txt)
else:
#if self.operation_mode == MAIN_HELP_MODE:
# self.operation_mode = self.window_mode = NORMAL_MODE
# self.helpWin = None
# if logger.isEnabledFor(logging.DEBUG):
# logger.debug('MODE: MAIN_HELP_MODE => NORMAL_MODE')
#elif self.operation_mode == PLAYLIST_HELP_MODE:
# self.operation_mode = self.window_mode = PLAYLIST_MODE
# self.helpWin = None
# if logger.isEnabledFor(logging.DEBUG):
# logger.debug('MODE: PLAYLIST_HELP_MODE => PLAYLIST_MODE')
#elif self.operation_mode == THEME_HELP_MODE:
# self.operation_mode = self.window_mode = THEME_MODE
# self.helpWin = None
# if logger.isEnabledFor(logging.DEBUG):
# logger.debug('MODE: THEME_HELP_MODE => THEME_MODE')
# make sure selected is visible
max_lines = self.maxY - 4
if self.number_of_items < max_lines:
self.startPos = 0
elif not self.startPos <= self.selection < self.startPos + max_lines:
self._put_selection_in_the_middle()
self.refreshBody()
|
[
" Initializes the body/story window ",
"Rypadio is not able to use the player you specified.\n\n This means that either this particular player is not supported\n by PyRadio, or that you have simply misspelled its name.\n\n PyRadio currently supports three players: mpv, mplayer and vlc,\n automatically detected in this order.",
"PyRadio is not able to detect any players.\n\n PyRadio currently supports three players: mpv, mplayer and vlc,\n automatically detected in this order.\n\n Please install any one of them and try again.\n\n Please keep in mind that if mpv is installed, socat must be\n installed as well."
] |
Please provide a description of the function:def initFooter(self):
self.footerWin.bkgd(' ', curses.color_pair(7))
self.footerWin.noutrefresh()
|
[
" Initializes the body/story window "
] |
Please provide a description of the function:def ctrl_c_handler(self, signum, frame):
self.ctrl_c_pressed = True
if self._cnf.dirty_playlist:
self.saveCurrentPlaylist()
self._cnf.save_config()
|
[
" Try to auto save playlist on exit\n Do not check result!!! ",
" Try to auto save config on exit\n Do not check result!!! "
] |
Please provide a description of the function:def _goto_playing_station(self, changing_playlist=False):
if (self.player.isPlaying() or self.operation_mode == PLAYLIST_MODE) and \
(self.selection != self.playing or changing_playlist):
if changing_playlist:
self.startPos = 0
max_lines = self.bodyMaxY - 2
if logger.isEnabledFor(logging.INFO):
logger.info('max_lines = {0}, self.playing = {1}'.format(max_lines, self.playing))
if self.number_of_items < max_lines:
self.startPos = 0
elif self.playing < self.startPos or \
self.playing >= self.startPos + max_lines:
if logger.isEnabledFor(logging.INFO):
logger.info('=== _goto:adjusting startPos')
if self.playing < max_lines:
self.startPos = 0
if self.playing - int(max_lines/2) > 0:
self.startPos = self.playing - int(max_lines/2)
elif self.playing > self.number_of_items - max_lines:
self.startPos = self.number_of_items - max_lines
else:
self.startPos = int(self.playing+1/max_lines) - int(max_lines/2)
if logger.isEnabledFor(logging.INFO):
logger.info('===== _goto:startPos = {0}, changing_playlist = {1}'.format(self.startPos, changing_playlist))
self.selection = self.playing
self.refreshBody()
|
[
" make sure playing station is visible "
] |
Please provide a description of the function:def setStation(self, number):
# If we press up at the first station, we go to the last one
# and if we press down on the last one we go back to the first one.
if number < 0:
number = len(self.stations) - 1
elif number >= len(self.stations):
number = 0
self.selection = number
maxDisplayedItems = self.bodyMaxY - 2
if self.selection - self.startPos >= maxDisplayedItems:
self.startPos = self.selection - maxDisplayedItems + 1
elif self.selection < self.startPos:
self.startPos = self.selection
|
[
" Select the given station number "
] |
Please provide a description of the function:def stopPlayer(self, show_message=True):
try:
self.player.close()
except:
pass
finally:
self.playing = -1
if show_message:
self.log.write('{}: Playback stopped'.format(self._format_player_string()), thread_lock=None, help_msg=True)
|
[
" stop player "
] |
Please provide a description of the function:def _show_help(self, txt,
mode_to_set=MAIN_HELP_MODE,
caption=' Help ',
prompt=' Press any key to hide ',
too_small_msg='Window too small to show message',
is_message=False):
self.helpWinContainer = None
self.helpWin = None
self.operation_mode = mode_to_set
txt_col = curses.color_pair(5)
box_col = curses.color_pair(3)
caption_col = curses.color_pair(4)
lines = txt.split('\n')
st_lines = [item.replace('\r','') for item in lines]
lines = [item.strip() for item in st_lines]
inner_height = len(lines) + 2
inner_width = self._get_message_width_from_list(lines) + 4
outer_height = inner_height + 2
outer_width = inner_width + 2
if self.window_mode == CONFIG_MODE and \
self.operation_mode > CONFIG_HELP_MODE:
use_empty_win = True
height_to_use = outer_height
width_to_use = outer_width
else:
use_empty_win = False
height_to_use = inner_height
width_to_use = inner_width
if self.maxY - 2 < outer_height or self.maxX < outer_width:
txt = too_small_msg
inner_height = 3
inner_width = len(txt) + 4
if use_empty_win:
height_to_use = inner_height +2
width_to_use = inner_width + 2
else:
height_to_use = inner_height
width_to_use = inner_width
if self.maxX < width_to_use:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(' *** Window too small even to show help warning ***')
self.operation_mode = self.window_mode = NORMAL_MODE
return
lines = [ txt , ]
if use_empty_win:
self.helpWinContainer = curses.newwin(height_to_use,width_to_use,int((self.maxY-height_to_use)/2),int((self.maxX-width_to_use)/2))
self.helpWinContainer.bkgdset(' ', box_col)
self.helpWinContainer.erase()
self.helpWin = curses.newwin(inner_height,inner_width,int((self.maxY-inner_height)/2),int((self.maxX-inner_width)/2))
self.helpWin.bkgdset(' ', box_col)
self.helpWin.erase()
self.helpWin.box()
if is_message:
start_with = txt_col
follow = caption_col
else:
start_with = caption_col
follow = txt_col
if caption.strip():
self.helpWin.addstr(0, int((inner_width-len(caption))/2), caption, caption_col)
splited = []
for i, n in enumerate(lines):
a_line = self._replace_starting_undesscore(n)
if a_line.startswith('%'):
self.helpWin.move(i + 1, 0)
try:
self.helpWin.addstr('├', curses.color_pair(3))
self.helpWin.addstr('─' * (inner_width - 2), curses.color_pair(3))
self.helpWin.addstr('┤', curses.color_pair(3))
except:
self.helpWin.addstr('├'.encode('utf-8'), curses.color_pair(3))
self.helpWin.addstr('─'.encode('utf-8') * (inner_width - 2), curses.color_pair(3))
self.helpWin.addstr('┤'.encode('utf-8'), curses.color_pair(3))
self.helpWin.addstr(i + 1, inner_width-len(a_line[1:]) - 1, a_line[1:].replace('_', ' '), caption_col)
#self.helpWin.addstr(i + 1, int((inner_width-len(a_line[1:]))/2), a_line[1:].replace('_', ' '), caption_col)
else:
splited = a_line.split('|')
self.helpWin.move(i + 1, 2)
for part, part_string in enumerate(splited):
if part_string.strip():
if part == 0 or part % 2 == 0:
self.helpWin.addstr(splited[part], start_with)
else:
self.helpWin.addstr(splited[part], follow)
if prompt.strip():
self.helpWin.addstr(inner_height - 1, int(inner_width-len(prompt)-1), prompt)
if use_empty_win:
self.helpWinContainer.refresh()
self.helpWin.refresh()
|
[
" Display a help, info or question window. "
] |
Please provide a description of the function:def _format_playlist_line(self, lineNum, pad, station):
line = "{0}. {1}".format(str(lineNum + self.startPos + 1).rjust(pad), station[0])
f_data = ' [{0}, {1}]'.format(station[2], station[1])
if version_info < (3, 0):
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX -2:
f_data = ' [{0}]'.format(station[1])
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 2:
while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 3:
f_data = f_data[:-1]
f_data += ']'
if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2:
while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2:
line += ' '
else:
if len(line) + len(f_data) > self.bodyMaxX -2:
f_data = ' [{0}]'.format(station[1])
if len(line) + len(f_data) > self.bodyMaxX - 2:
while len(line) + len(f_data) > self.bodyMaxX - 3:
f_data = f_data[:-1]
f_data += ']'
if len(line) + len(f_data) < self.maxX - 2:
while len(line) + len(f_data) < self.maxX - 2:
line += ' '
line += f_data
return line
|
[
" format playlist line so that if fills self.maxX ",
" this is too long, try to shorten it\n by removing file size ",
" still too long. start removing chars ",
" if too short, pad f_data to the right ",
" this is too long, try to shorten it\n by removing file size ",
" still too long. start removing chars ",
" if too short, pad f_data to the right "
] |
Please provide a description of the function:def _print_foreign_playlist_message(self):
self.operation_mode = self.window_mode = NORMAL_MODE
self.refreshBody()
txt='''A playlist by this name:
__"|{0}|"
already exists in the config directory.
This playlist was saved as:
__"|{1}|"
'''.format(self._cnf.foreign_filename_only_no_extension,
self._cnf.stations_filename_only_no_extension)
self._show_help(txt, FOREIGN_PLAYLIST_MESSAGE_MODE,
caption = ' Foreign playlist ',
prompt = ' Press any key ',
is_message=True)
|
[
" reset previous message ",
" display new message "
] |
Please provide a description of the function:def _print_foreign_playlist_copy_error(self):
self.operation_mode = self.window_mode = NORMAL_MODE
self.refreshBody()
txt ='''Foreign playlist copying |failed|!
Make sure the file is not open with another
application and try to load it again
'''
self._show_help(txt, FOREIGN_PLAYLIST_COPY_ERROR_MODE,
caption = ' Error ',
prompt = ' Press any key ',
is_message=True)
|
[
" reset previous message "
] |
Please provide a description of the function:def _align_stations_and_refresh(self, cur_mode):
need_to_scan_playlist = False
self.stations = self._cnf.stations
self.number_of_items = len(self.stations)
if self.number_of_items == 0:
if self.player.isPlaying():
self.stopPlayer()
self.playing,self.selection, self.stations, \
self.number_of_items = (-1, 0, 0, 0)
self.operation_mode = self.window_mode = NORMAL_MODE
self.refreshBody()
return
else:
#if logger.isEnabledFor(logging.DEBUG):
# logger.debug('self.playing = {}'.format(self.playing))
if cur_mode == REMOVE_STATION_MODE:
if self.player.isPlaying():
if self.selection == self.playing:
self.stopPlayer()
self.playing = -1
elif self.selection < self.playing:
self.playing -= 1
else:
self.playing = -1
if self.selection > self.number_of_items - self.bodyMaxY:
self.startPos -= 1
if self.selection >= self.number_of_items:
self.selection -= 1
if self.startPos < 0:
self.startPos = 0
else:
if self.player.isPlaying():
if self.playing > self.number_of_items - 1:
need_to_scan_playlist = True
else:
if self.stations[self.playing][0] == self.active_stations[1][0]:
self.selection = self._get_station_id(self.active_stations[0][0])
if logger.isEnabledFor(logging.DEBUG):
logger.debug('** Selected station is {0} at {1}'.format(self.stations[self.selection], self.selection))
else:
self.playing -= 1
if self.playing == -1:
self.playing = len(self.stations) - 1
if self.stations[self.playing][0] == self.active_stations[1][0]:
self.selection = self._get_station_id(self.active_stations[0][0])
if logger.isEnabledFor(logging.DEBUG):
logger.debug('** Selection station is {0} at {1}'.format(self.stations[self.playing], self.playing))
else:
need_to_scan_playlist = True
else:
need_to_scan_playlist = True
if need_to_scan_playlist:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Scanning playlist for stations...')
self.selection, self.playing = self._get_stations_ids((
self.active_stations[0][0],
self.active_stations[1][0]))
if self.playing == -1:
self.stopPlayer()
need_to_calulate_position = True
if self.player.isPlaying():
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Setting playing station at {}'.format(self.playing))
self.setStation(self.playing)
else:
if self.selection == -1:
self.selection = 0
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Setting selection station at {}'.format(self.selection))
self.setStation(self.selection)
if self.selection < 0:
self.selection = 0
self.startPos = 0
self._goto_playing_station(changing_playlist=True)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('self.selection = {0}, self.playing = {1}, self.startPos = {2}'.format(self.selection, self.playing, self.startPos))
self.selections[self.operation_mode] = [self.selection, self.startPos, self.playing, self._cnf.stations]
self.refreshBody()
|
[
" refresh reference ",
" The playlist is empty ",
" Remove selected station ",
" The playlist is not empty ",
" Previous playing station is now invalid\n Need to scan playlist ",
" ok, self.playing found, just find selection ",
" station playing id changed, try previous station ",
" ok, self.playing found, just find selection ",
" self.playing still not found, have to scan playlist ",
" not playing, can i get a selection? ",
" calculate new position ",
" make sure we have a valid selection ",
" make sure playing station is visible "
] |
Please provide a description of the function:def _open_playlist(self):
self._get_active_stations()
self.jumpnr = ''
self._random_requested = False
txt = '''Reading playlists. Please wait...'''
self._show_help(txt, NORMAL_MODE, caption=' ', prompt=' ', is_message=True)
self.selections[self.operation_mode] = [self.selection, self.startPos, self.playing, self._cnf.stations]
self.operation_mode = self.window_mode = PLAYLIST_MODE
self.search = self._playlists_search
self.selection, self.startPos, self.playing, self.stations = self.selections[self.operation_mode]
self.number_of_items, self.playing = self.readPlaylists()
self.stations = self._cnf.playlists
if self.number_of_items == 0:
return
else:
self.refreshBody()
if logger.isEnabledFor(logging.DEBUG):
logger.debug('MODE: NORMAL_MODE -> PLAYLIST_MODE')
return
|
[
" open playlist "
] |
Please provide a description of the function:def _toggle_transparency(self, changed_from_config_window=False, force_value=None):
if self.window_mode == CONFIG_MODE and not changed_from_config_window:
return
self._theme.toggleTransparency(force_value)
self._cnf.use_transparency = self._theme.getTransparency()
if self.operation_mode == THEME_MODE:
self._theme_slector.transparent = self._cnf.use_transparency
self.headWin.refresh()
self.bodyWin.refresh()
self.footerWin.refresh()
if self._config_win:
self._config_win._config_options['use_transparency'][1] = self._cnf.use_transparency
if not changed_from_config_window:
self._config_win._saved_config_options['use_transparency'][1] = self._cnf.use_transparency
self._config_win._old_use_transparency = self._cnf.use_transparency
|
[
" Toggles theme trasparency.\n\n changed_from_config_window is used to inhibit toggling from within\n Config Window when 'T' is pressed.\n\n force_value will set trasparency if True or False,\n or toggle trasparency if None\n "
] |
Please provide a description of the function:def _print_options_help(self):
for i, x in enumerate(self._help_lines[self.selection]):
if i + 2 == self.maxY:
break
self._win.addstr(i+2, self._second_column, ' ' * (self._second_column - 1), curses.color_pair(5))
self._win.addstr(i+2, self._second_column, x.replace('|',''), curses.color_pair(5))
if len(self._help_lines[self.selection]) < self._num_of_help_lines:
for i in range(len(self._help_lines[self.selection]), self._num_of_help_lines):
try:
self._win.addstr(i+2, self._second_column, ' ' * (self._second_column - 1), curses.color_pair(5))
except:
pass
self._num_of_help_lines = len(self._help_lines[self.selection])
|
[
"\n # Uncomment if trouble with help lines\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('self._num_of_help_lines = {}'.format(self._num_of_help_lines))\n "
] |
Please provide a description of the function:def refresh_win(self, set_encoding=True):
self._fix_geometry()
self.init_window(set_encoding)
self._win.bkgdset(' ', curses.color_pair(3))
self._win.erase()
self._win.box()
self._win.addstr(0,
int((self.maxX - len(self._title)) / 2),
self._title,
curses.color_pair(4))
for i in range(1, self.maxX - 1):
try:
self._win.addch(self.maxY -4, i, '─', curses.color_pair(3))
except:
self._win.addstr(self.maxY -4 , i, u'─'.encode('utf-8'), curses.color_pair(3))
try:
self._win.addch(self.maxY - 4, 0, '├', curses.color_pair(3))
self._win.addch(self.maxY - 4, self.maxX - 1, '┤', curses.color_pair(3))
except:
self._win.addstr(self.maxY - 4, 0, u'├'.encode('utf-8'), curses.color_pair(3))
self._win.addstr(self.maxY - 4, self.maxX - 1, u'┤'.encode('utf-8'), curses.color_pair(3))
self._num_of_rows = int(len(self._encodings) / self._num_of_columns)
self._get_invalids()
self.refresh_selection()
|
[
" set_encoding is False when resizing "
] |
Please provide a description of the function:def _resize(self, init=False):
col, row = self._selection_to_col_row(self.selection)
if not (self.startPos <= row <= self.startPos + self.list_maxY - 1):
while row > self.startPos:
self.startPos += 1
while row < self.startPos + self.list_maxY - 1:
self.startPos -= 1
''' if the selection at the end of the list,
try to scroll down '''
if init and row > self.list_maxY:
new_startPos = self._num_of_rows - self.list_maxY + 1
if row > new_startPos:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('setting startPos at {}'.format(new_startPos))
self.startPos = new_startPos
self.refresh_selection()
|
[] |
Please provide a description of the function:def refresh_win(self, resizing=False):
#self.init_window(set_encoding)
self._win.bkgdset(' ', curses.color_pair(3))
self._win.erase()
self._win.box()
self._win.addstr(0,
int((self.maxX - len(self._title)) / 2),
self._title,
curses.color_pair(4))
self.refresh_selection(resizing)
|
[
" set_encoding is False when resizing "
] |
Please provide a description of the function:def _read_items(self):
self._items = []
self._items = glob.glob(path.join(self._config_path, '*.csv'))
if len(self._items) == 0:
return 0, -1
else:
self._items.sort()
for i, an_item in enumerate(self._items):
self._items[i] = an_item.replace(self._config_path + sep, '').replace('.csv', '')
for i, a_playlist in enumerate(self._items):
if a_playlist ==self._selected_playlist:
self._selected_playlist_id = i
break
self._max_len = len(max(self._items, key=len))
if self._max_len > 44:
self._max_len = 44
self._num_of_items = len(self._items)
|
[
" get already loaded playlist id "
] |
Please provide a description of the function:def write(self, msg, thread_lock=None, help_msg=False):
if self.cursesScreen:
if thread_lock is not None:
thread_lock.acquire()
self.cursesScreen.erase()
try:
self.msg = msg.strip()
self.cursesScreen.addstr(0, 1, self.msg[0: self.width]
.replace("\r", "").replace("\n", ""))
except:
self.msg = msg.encode('utf-8', 'replace').strip()
self.cursesScreen.addstr(0, 1, self.msg[0: self.width]
.replace("\r", "").replace("\n", ""))
self.cursesScreen.refresh()
if thread_lock is not None:
thread_lock.release()
self.last_written_string = msg
if help_msg or self.display_help_message:
self.write_right('Press ? for help', thread_lock)
self.display_help_message = True
|
[
" msg may or may not be encoded "
] |
Please provide a description of the function:def write_right(self, msg, thread_lock=None):
if self.cursesScreen:
if thread_lock is not None:
thread_lock.acquire()
try:
a_msg = msg.strip()
self.cursesScreen.addstr(0, self.width + 5 - len(a_msg) - 1, a_msg.replace("\r", "").replace("\n", ""))
except:
a_msg = msg.encode('utf-8', 'replace').strip()
self.cursesScreen.addstr(0, self.width + 5 - len(a_msg) - 1, a_msg.replace("\r", "").replace("\n", ""))
self.cursesScreen.refresh()
if thread_lock is not None:
thread_lock.release()
|
[
" msg may or may not be encoded "
] |
Please provide a description of the function:def keypress(self, win, char):
if not self._focused:
return 1
if self.log is not None:
self.log('char = {}\n'.format(char))
if char in (curses.KEY_ENTER, ord('\n'), ord('\r')):
if self._has_history:
self._input_history.add_to_history(self._string)
return 0
elif char in (curses.KEY_EXIT, 27):
self._edit_win.nodelay(True)
char = self._edit_win.getch()
self._log_file='/home/spiros/edit.log'
self._log(' *** char = {}\n'.format(char))
self._edit_win.nodelay(False)
if char == -1:
self._string = ''
self._curs_pos = 0
return -1
else:
return 1
elif char in (curses.KEY_RIGHT, curses.ascii.ACK):
self._curs_pos += 1
if len(self._string) < self._curs_pos:
self._curs_pos = len(self._string)
elif char in (curses.KEY_LEFT, ):
self._curs_pos -= 1
if self._curs_pos < 0:
self._curs_pos = 0
elif char in (curses.KEY_HOME, curses.ascii.SOH):
self._curs_pos = 0
elif char in (curses.KEY_END, curses.ascii.ENQ):
self._curs_pos = len(self._string)
elif char in (curses.KEY_DC, curses.ascii.EOT):
if self._curs_pos < len(self._string):
self._string = self._string[:self._curs_pos] + self._string[self._curs_pos+1:]
elif char in (curses.KEY_BACKSPACE, curses.ascii.BS,127):
if self._curs_pos > 0:
self._string = self._string[:self._curs_pos-1] + self._string[self._curs_pos:]
self._curs_pos -= 1
elif char in (curses.KEY_UP, curses.ascii.DLE):
if self._key_up_function_handler is not None:
try:
self._key_up_function_handler()
except:
pass
else:
if self._ungetch_unbound_keys:
curses.ungetch(char)
elif char in (curses.KEY_DOWN, curses.ascii.SO):
if self._key_down_function_handler is not None:
try:
self._key_down_function_handler()
except:
pass
else:
if self._ungetch_unbound_keys:
curses.ungetch(char)
elif char in (curses.KEY_NPAGE, ):
if self._key_pgdown_function_handler is not None:
try:
self._key_pgdown_function_handler()
except:
pass
else:
if self._ungetch_unbound_keys:
curses.ungetch(char)
elif char in (curses.KEY_PPAGE, ):
if self._key_pgup_function_handler is not None:
try:
self._key_pgup_function_handler()
except:
pass
elif char in (9, ):
if self._key_tab_function_handler is not None:
try:
self._key_tab_function_handler()
except:
pass
else:
if self._ungetch_unbound_keys:
curses.ungetch(char)
elif char in (curses.KEY_BTAB, ):
if self._key_stab_function_handler is not None:
try:
self._key_stab_function_handler()
except:
pass
else:
if self._ungetch_unbound_keys:
curses.ungetch(char)
elif char in (curses.ascii.VT, ):
self._string = self._string[:self._curs_pos]
elif 0<= char <=31:
pass
else:
if len(self._string) + 1 == self._max_width:
return 1
if version_info < (3, 0):
if 32 <= char < 127:
# accept only ascii characters
if len(self._string) == self._curs_pos:
self._string += chr(char)
self._curs_pos += 1
else:
self._string = self._string[:self._curs_pos] + chr(char) + self._string[self._curs_pos:]
else:
char = self._get_char(win, char)
if len(self._string) == self._curs_pos:
self._string += char
self._curs_pos += 1
else:
self._string = self._string[:self._curs_pos] + char + self._string[self._curs_pos:]
self.refreshEditWindow()
return 1
|
[
"\n returns:\n 1: get next char\n 0: exit edit mode, string isvalid\n -1: cancel\n ",
" ENTER ",
" ESCAPE ",
" KEY_RIGHT, Alt-F ",
" KEY_LEFT ",
" KEY_HOME, ^A ",
" KEY_END, ^E ",
" DEL key, ^D ",
" KEY_BACKSPACE ",
" KEY_UP, ^N ",
" KEY_DOWN, ^P ",
" PgDn ",
" PgUp ",
" TAB ",
" Shift-TAB ",
" Ctrl-K - delete to end of line "
] |
Please provide a description of the function:def _get_char(self, win, char):
def get_check_next_byte():
char = win.getch()
if 128 <= char <= 191:
return char
else:
raise UnicodeError
bytes = []
if char <= 127:
# 1 bytes
bytes.append(char)
#elif 194 <= char <= 223:
elif 192 <= char <= 223:
# 2 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
elif 224 <= char <= 239:
# 3 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
elif 240 <= char <= 244:
# 4 bytes
bytes.append(char)
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
bytes.append(get_check_next_byte())
#print('bytes = {}'.format(bytes))
while 0 in bytes:
bytes.remove(0)
if version_info < (3, 0):
out = ''.join([chr(b) for b in bytes])
else:
buf = bytearray(bytes)
out = self._decode_string(buf)
#out = buf.decode('utf-8')
return out
|
[
" no zero byte allowed "
] |
Please provide a description of the function:def _get_history_next(self):
if self._has_history:
ret = self._input_history.return_history(1)
self.string = ret
self._curs_pos = len(ret)
|
[
" callback function for key down "
] |
Please provide a description of the function:def apply_transformations(collection, transformations, select=None):
''' Apply all transformations to the variables in the collection.
Args:
transformations (list): List of transformations to apply.
select (list): Optional list of names of variables to retain after all
transformations are applied.
'''
for t in transformations:
kwargs = dict(t)
func = kwargs.pop('name')
cols = kwargs.pop('input', None)
if isinstance(func, string_types):
if func in ('and', 'or'):
func += '_'
if not hasattr(transform, func):
raise ValueError("No transformation '%s' found!" % func)
func = getattr(transform, func)
func(collection, cols, **kwargs)
if select is not None:
transform.Select(collection, select)
return collection
|
[] |
Please provide a description of the function:def setup(self, steps=None, drop_na=False, **kwargs):
''' Set up the sequence of steps for analysis.
Args:
steps (list): Optional list of steps to set up. Each element
must be either an int giving the index of the step in the
JSON config block list, or a str giving the (unique) name of
the step, as specified in the JSON config. Steps that do not
match either index or name will be skipped.
drop_na (bool): Boolean indicating whether or not to automatically
drop events that have a n/a amplitude when reading in data
from event files.
'''
# In the beginning, there was nothing
input_nodes = None
# Use inputs from model, and update with kwargs
selectors = self.model.get('input', {}).copy()
selectors.update(kwargs)
for i, b in enumerate(self.steps):
# Skip any steps whose names or indexes don't match block list
if steps is not None and i not in steps and b.name not in steps:
continue
b.setup(input_nodes, drop_na=drop_na, **selectors)
input_nodes = b.output_nodes
|
[] |
Please provide a description of the function:def setup(self, input_nodes=None, drop_na=False, **kwargs):
''' Set up the Step and construct the design matrix.
Args:
input_nodes (list): Optional list of Node objects produced by
the preceding Step in the analysis. If None, uses any inputs
passed in at Step initialization.
drop_na (bool): Boolean indicating whether or not to automatically
drop events that have a n/a amplitude when reading in data
from event files.
kwargs: Optional keyword arguments to pass onto load_variables.
'''
self.output_nodes = []
input_nodes = input_nodes or self.input_nodes or []
# TODO: remove the scan_length argument entirely once we switch tests
# to use the synthetic dataset with image headers.
if self.level != 'run':
kwargs = kwargs.copy()
kwargs.pop('scan_length', None)
collections = self.layout.get_collections(self.level, drop_na=drop_na,
**kwargs)
objects = collections + input_nodes
objects, kwargs = self._filter_objects(objects, kwargs)
groups = self._group_objects(objects)
# Set up and validate variable lists
model = self.model or {}
X = model.get('x', [])
for grp in groups:
# Split into separate lists of Collections and Nodes
input_nodes = [o for o in grp if isinstance(o, AnalysisNode)]
colls = list(set(grp) - set(input_nodes))
if input_nodes:
node_coll = self._concatenate_input_nodes(input_nodes)
colls.append(node_coll)
coll = merge_collections(colls) if len(colls) > 1 else colls[0]
coll = apply_transformations(coll, self.transformations)
if X:
transform.Select(coll, X)
node = AnalysisNode(self.level, coll, self.contrasts, input_nodes,
self.auto_contrasts)
self.output_nodes.append(node)
|
[] |
Please provide a description of the function:def get_design_matrix(self, names=None, format='long', mode='both',
force=False, sampling_rate='TR', **kwargs):
''' Get design matrix and associated information.
Args:
names (list): Optional list of names of variables to include in the
returned design matrix. If None, all variables are included.
format (str): Whether to return the design matrix in 'long' or
'wide' format. Note that dense design matrices are always
returned in 'wide' format.
mode (str): Specifies whether to return variables in a sparse
representation ('sparse'), dense representation ('dense'), or
both ('both').
force (bool): Indicates how to handle columns not of the type
indicated by the mode argument. When False, variables of the
non-selected type will be silently ignored. When True,
variables will be forced to the desired representation. For
example, if mode='dense' and force=True, sparse variables will
be converted to dense variables and included in the returned
design matrix in the .dense attribute. The force argument is
ignored entirely if mode='both'.
sampling_rate ('TR', 'highest' or float): Sampling rate at which to
generate the dense design matrix. When 'TR', the repetition
time is used, if available, to select the sampling rate (1/TR).
When 'highest', all variables are resampled to the highest
sampling rate of any variable. The sampling rate may also be
specified explicitly in Hz. Has no effect on sparse design
matrices.
kwargs: Optional keyword arguments. Includes (1) selectors used
to constrain which of the available nodes get returned
(e.g., passing subject=['01', '02'] will return design
information only for subjects '01' and '02'), and (2) arguments
passed on to each Variable's to_df() call (e.g.,
sampling_rate, entities, timing, etc.).
Returns:
A list of DesignMatrixInfo namedtuples--one per unit of the current
analysis level (e.g., if level='run', each element in the list
represents the design matrix for a single run).
'''
nodes, kwargs = self._filter_objects(self.output_nodes, kwargs)
return [n.get_design_matrix(names, format, mode=mode, force=force,
sampling_rate=sampling_rate, **kwargs)
for n in nodes]
|
[] |
Please provide a description of the function:def get_contrasts(self, names=None, variables=None, **kwargs):
''' Return contrast information for the current block.
Args:
names (list): Optional list of names of contrasts to return. If
None (default), all contrasts are returned.
variables (bool): Optional list of strings giving the names of
design matrix columns to use when generating the matrix of
weights.
kwargs: Optional keyword arguments used to constrain which of the
available nodes get returned (e.g., passing subject=['01',
'02'] will return contrast information only for subjects '01'
and '02').
Returns:
A list with one element per unit of the current analysis level
(e.g., if level='run', each element in the list representing the
contrast information for a single run). Each element is a list of
ContrastInfo namedtuples (one per contrast).
'''
nodes, kwargs = self._filter_objects(self.output_nodes, kwargs)
return [n.get_contrasts(names, variables) for n in nodes]
|
[] |
Please provide a description of the function:def get_design_matrix(self, names=None, format='long', mode='both',
force=False, sampling_rate='TR', **kwargs):
''' Get design matrix and associated information.
Args:
names (list): Optional list of names of variables to include in the
returned design matrix. If None, all variables are included.
format (str): Whether to return the design matrix in 'long' or
'wide' format. Note that dense design matrices are always
returned in 'wide' format.
mode (str): Specifies whether to return variables in a sparse
representation ('sparse'), dense representation ('dense'), or
both ('both').
force (bool): Indicates how to handle columns not of the type
indicated by the mode argument. When False, variables of the
non-selected type will be silently ignored. When True,
variables will be forced to the desired representation. For
example, if mode='dense' and force=True, sparse variables will
be converted to dense variables and included in the returned
design matrix in the .dense attribute. The force argument is
ignored entirely if mode='both'.
sampling_rate ('TR', 'highest' or float): Sampling rate at which to
generate the dense design matrix. When 'TR', the repetition
time is used, if available, to select the sampling rate (1/TR).
When 'highest', all variables are resampled to the highest
sampling rate of any variable. The sampling rate may also be
specified explicitly in Hz. Has no effect on sparse design
matrices.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., sampling_rate, entities, timing, etc.).
Returns:
A DesignMatrixInfo namedtuple.
'''
sparse_df, dense_df = None, None
coll = self.collection
if self.level != 'run' and mode != 'sparse':
mode = 'sparse'
include_sparse = include_dense = (force and mode != 'both')
if mode in ['sparse', 'both']:
kwargs['sparse'] = True
sparse_df = coll.to_df(names, format, include_dense=include_dense,
**kwargs)
if mode in ['dense', 'both']:
# The current implementation of pivoting to wide in
# BIDSVariableCollection.to_df() breaks if we don't have the
# temporal columns to index on, so we force their inclusion first
# and then drop them afterwards.
kwargs['timing'] = True
kwargs['sparse'] = False
if sampling_rate == 'TR':
trs = {var.run_info[0].tr for var in self.collection.variables.values()}
if not trs:
raise ValueError("Repetition time unavailable; specify sampling_rate "
"explicitly")
elif len(trs) > 1:
raise ValueError("Non-unique Repetition times found ({!r}); specify "
"sampling_rate explicitly")
sampling_rate = 1. / trs.pop()
elif sampling_rate == 'highest':
sampling_rate = None
dense_df = coll.to_df(names, format='wide',
include_sparse=include_sparse,
sampling_rate=sampling_rate, **kwargs)
if dense_df is not None:
dense_df = dense_df.drop(['onset', 'duration'], axis=1)
return DesignMatrixInfo(sparse_df, dense_df, self.entities)
|
[] |
Please provide a description of the function:def get_contrasts(self, names=None, variables=None):
''' Return contrast information for the current block.
Args:
names (list): Optional list of names of contrasts to return. If
None (default), all contrasts are returned.
variables (bool): Optional list of strings giving the names of
design matrix columns to use when generating the matrix of
weights.
Notes:
The 'variables' argument take precedence over the natural process
of column selection. I.e.,
if a variable shows up in a contrast, but isn't named in
variables, it will *not* be included in the returned
Returns:
A list of ContrastInfo namedtuples, one per contrast.
'''
# Verify that there are no invalid columns in the condition_lists
all_conds = [c['condition_list'] for c in self._block_contrasts]
all_conds = set(chain(*all_conds))
bad_conds = all_conds - set(self.collection.variables.keys())
if bad_conds:
raise ValueError("Invalid condition names passed in one or more "
" contrast condition lists: %s." % bad_conds)
# Construct a list of all contrasts, including identity contrasts
contrasts = list(self._block_contrasts)
# Check that all contrasts have unique name
contrast_names = [c['name'] for c in contrasts]
if len(set(contrast_names)) < len(contrast_names):
raise ValueError("One or more contrasts have the same name")
contrast_names = list(set(contrast_names))
if self.auto_contrasts:
for col_name in self.auto_contrasts:
if (col_name in self.collection.variables.keys()
and col_name not in contrast_names):
contrasts.append({
'name': col_name,
'condition_list': [col_name],
'weights': [1],
'type': 't'
})
# Filter on desired contrast names if passed
if names is not None:
contrasts = [c for c in contrasts if c['name'] in names]
def setup_contrast(c):
weights = np.atleast_2d(c['weights'])
weights = pd.DataFrame(weights, columns=c['condition_list'])
# If variables were explicitly passed, use them as the columns
if variables is not None:
var_df = pd.DataFrame(columns=variables)
weights = pd.concat([weights, var_df],
sort=True)[variables].fillna(0)
test_type = c.get('type', ('t' if len(weights) == 1 else 'F'))
return ContrastInfo(c['name'], weights, test_type, self.entities)
self._contrasts = [setup_contrast(c) for c in contrasts]
return self._contrasts
|
[] |
Please provide a description of the function:def remove_duplicates(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
|
[
"\n Return unique elements from list while preserving order.\n From https://stackoverflow.com/a/480227/2589328\n "
] |
Please provide a description of the function:def list_to_str(lst):
if len(lst) == 1:
str_ = lst[0]
elif len(lst) == 2:
str_ = ' and '.join(lst)
elif len(lst) > 2:
str_ = ', '.join(lst[:-1])
str_ += ', and {0}'.format(lst[-1])
else:
raise ValueError('List of length 0 provided.')
return str_
|
[
"\n Turn a list into a comma- and/or and-separated string.\n\n Parameters\n ----------\n lst : :obj:`list`\n A list of strings to join into a single string.\n\n Returns\n -------\n str_ : :obj:`str`\n A string with commas and/or ands separating th elements from ``lst``.\n\n "
] |
Please provide a description of the function:def get_slice_info(slice_times):
# Slice order
slice_times = remove_duplicates(slice_times)
slice_order = sorted(range(len(slice_times)), key=lambda k: slice_times[k])
if slice_order == range(len(slice_order)):
slice_order_name = 'sequential ascending'
elif slice_order == reversed(range(len(slice_order))):
slice_order_name = 'sequential descending'
elif slice_order[0] < slice_order[1]:
# We're allowing some wiggle room on interleaved.
slice_order_name = 'interleaved ascending'
elif slice_order[0] > slice_order[1]:
slice_order_name = 'interleaved descending'
else:
slice_order = [str(s) for s in slice_order]
raise Exception('Unknown slice order: [{0}]'.format(', '.join(slice_order)))
return slice_order_name
|
[
"\n Extract slice order from slice timing info.\n\n TODO: Be more specific with slice orders.\n Currently anything where there's some kind of skipping is interpreted as\n interleaved of some kind.\n\n Parameters\n ----------\n slice_times : array-like\n A list of slice times in seconds or milliseconds or whatever.\n\n Returns\n -------\n slice_order_name : :obj:`str`\n The name of the slice order sequence.\n "
] |
Please provide a description of the function:def get_seqstr(config, metadata):
seq_abbrs = metadata.get('ScanningSequence', '').split('_')
seqs = [config['seq'].get(seq, seq) for seq in seq_abbrs]
variants = [config['seqvar'].get(var, var) for var in \
metadata.get('SequenceVariant', '').split('_')]
seqs = list_to_str(seqs)
if seq_abbrs[0]:
seqs += ' ({0})'.format(os.path.sep.join(seq_abbrs))
variants = list_to_str(variants)
return seqs, variants
|
[
"\n Extract and reformat imaging sequence(s) and variant(s) into pretty\n strings.\n\n Parameters\n ----------\n config : :obj:`dict`\n A dictionary with relevant information regarding sequences, sequence\n variants, phase encoding directions, and task names.\n metadata : :obj:`dict`\n The metadata for the scan.\n\n Returns\n -------\n seqs : :obj:`str`\n Sequence names.\n variants : :obj:`str`\n Sequence variant names.\n "
] |
Please provide a description of the function:def get_sizestr(img):
n_x, n_y, n_slices = img.shape[:3]
import numpy as np
voxel_dims = np.array(img.header.get_zooms()[:3])
matrix_size = '{0}x{1}'.format(num_to_str(n_x), num_to_str(n_y))
voxel_size = 'x'.join([num_to_str(s) for s in voxel_dims])
fov = [n_x, n_y] * voxel_dims[:2]
fov = 'x'.join([num_to_str(s) for s in fov])
return n_slices, voxel_size, matrix_size, fov
|
[
"\n Extract and reformat voxel size, matrix size, field of view, and number of\n slices into pretty strings.\n\n Parameters\n ----------\n img : :obj:`nibabel.Nifti1Image`\n Image from scan from which to derive parameters.\n\n Returns\n -------\n n_slices : :obj:`int`\n Number of slices.\n voxel_size : :obj:`str`\n Voxel size string (e.g., '2x2x2')\n matrix_size : :obj:`str`\n Matrix size string (e.g., '128x128')\n fov : :obj:`str`\n Field of view string (e.g., '256x256')\n "
] |
Please provide a description of the function:def parse_file_entities(filename, entities=None, config=None,
include_unmatched=False):
# Load Configs if needed
if entities is None:
if config is None:
config = ['bids', 'derivatives']
config = [Config.load(c) if not isinstance(c, Config) else c
for c in listify(config)]
# Consolidate entities from all Configs into a single dict
entities = {}
for c in config:
entities.update(c.entities)
entities = entities.values()
# Extract matches
bf = BIDSFile(filename)
ent_vals = {}
for ent in entities:
match = ent.match_file(bf)
if match is not None or include_unmatched:
ent_vals[ent.name] = match
return ent_vals
|
[
" Parse the passed filename for entity/value pairs.\n\n Args:\n filename (str): The filename to parse for entity values\n entities (list): An optional list of Entity instances to use in\n extraction. If passed, the config argument is ignored.\n config (str, Config, list): One or more Config objects or names of\n configurations to use in matching. Each element must be a Config\n object, or a valid Config name (e.g., 'bids' or 'derivatives').\n If None, all available configs are used.\n include_unmatched (bool): If True, unmatched entities are included\n in the returned dict, with values set to None. If False\n (default), unmatched entities are ignored.\n\n Returns: A dict, where keys are Entity names and values are the\n values extracted from the filename.\n "
] |
Please provide a description of the function:def add_config_paths(**kwargs):
for k, path in kwargs.items():
if not os.path.exists(path):
raise ValueError(
'Configuration file "{}" does not exist'.format(k))
if k in cf.get_option('config_paths'):
raise ValueError('Configuration {!r} already exists'.format(k))
kwargs.update(**cf.get_option('config_paths'))
cf.set_option('config_paths', kwargs)
|
[
" Add to the pool of available configuration files for BIDSLayout.\n\n Args:\n kwargs: dictionary specifying where to find additional config files.\n Keys are names, values are paths to the corresponding .json file.\n\n Example:\n > add_config_paths(my_config='/path/to/config')\n > layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config'])\n "
] |
Please provide a description of the function:def parse_file_entities(self, filename, scope='all', entities=None,
config=None, include_unmatched=False):
''' Parse the passed filename for entity/value pairs.
Args:
filename (str): The filename to parse for entity values
scope (str, list): The scope of the search space. Indicates which
BIDSLayouts' entities to extract. See BIDSLayout docstring
for valid values. By default, extracts all entities
entities (list): An optional list of Entity instances to use in
extraction. If passed, the scope and config arguments are
ignored, and only the Entities in this list are used.
config (str, Config, list): One or more Config objects, or paths
to JSON config files on disk, containing the Entity definitions
to use in extraction. If passed, scope is ignored.
include_unmatched (bool): If True, unmatched entities are included
in the returned dict, with values set to None. If False
(default), unmatched entities are ignored.
Returns: A dict, where keys are Entity names and values are the
values extracted from the filename.
'''
# If either entities or config is specified, just pass through
if entities is None and config is None:
layouts = self._get_layouts_in_scope(scope)
config = chain(*[list(l.config.values()) for l in layouts])
config = list(set(config))
return parse_file_entities(filename, entities, config,
include_unmatched)
|
[] |
Please provide a description of the function:def add_derivatives(self, path, **kwargs):
''' Add BIDS-Derivatives datasets to tracking.
Args:
path (str, list): One or more paths to BIDS-Derivatives datasets.
Each path can point to either a derivatives/ directory
containing one more more pipeline directories, or to a single
pipeline directory (e.g., derivatives/fmriprep).
kwargs (dict): Optional keyword arguments to pass on to
BIDSLayout() when initializing each of the derivative datasets.
Note: Every derivatives directory intended for indexing MUST contain a
valid dataset_description.json file. See the BIDS-Derivatives
specification for details.
'''
paths = listify(path)
deriv_dirs = []
# Collect all paths that contain a dataset_description.json
def check_for_description(dir):
dd = os.path.join(dir, 'dataset_description.json')
return os.path.exists(dd)
for p in paths:
p = os.path.abspath(p)
if os.path.exists(p):
if check_for_description(p):
deriv_dirs.append(p)
else:
subdirs = [d for d in os.listdir(p)
if os.path.isdir(os.path.join(p, d))]
for sd in subdirs:
sd = os.path.join(p, sd)
if check_for_description(sd):
deriv_dirs.append(sd)
if not deriv_dirs:
warnings.warn("Derivative indexing was enabled, but no valid "
"derivatives datasets were found in any of the "
"provided or default locations. Please make sure "
"all derivatives datasets you intend to index "
"contain a 'dataset_description.json' file, as "
"described in the BIDS-derivatives specification.")
for deriv in deriv_dirs:
dd = os.path.join(deriv, 'dataset_description.json')
with open(dd, 'r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_name = description.get(
'PipelineDescription', {}).get('Name')
if pipeline_name is None:
raise ValueError("Every valid BIDS-derivatives dataset must "
"have a PipelineDescription.Name field set "
"inside dataset_description.json.")
if pipeline_name in self.derivatives:
raise ValueError("Pipeline name '%s' has already been added "
"to this BIDSLayout. Every added pipeline "
"must have a unique name!")
# Default config and sources values
kwargs['config'] = kwargs.get('config') or ['bids', 'derivatives']
kwargs['sources'] = kwargs.get('sources') or self
self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs)
# Consolidate all entities post-indexing. Note: no conflicts occur b/c
# multiple entries with the same name all point to the same instance.
for deriv in self.derivatives.values():
self.entities.update(deriv.entities)
|
[] |
Please provide a description of the function:def get(self, return_type='object', target=None, extensions=None,
scope='all', regex_search=False, defined_fields=None,
absolute_paths=None,
**kwargs):
# Warn users still expecting 0.6 behavior
if 'type' in kwargs:
raise ValueError("As of pybids 0.7.0, the 'type' argument has been"
" replaced with 'suffix'.")
layouts = self._get_layouts_in_scope(scope)
# Create concatenated file, node, and entity lists
files, entities, nodes = {}, {}, []
for l in layouts:
files.update(l.files)
entities.update(l.entities)
nodes.extend(l.nodes)
# Separate entity kwargs from metadata kwargs
ent_kwargs, md_kwargs = {}, {}
for k, v in kwargs.items():
if k in entities:
ent_kwargs[k] = v
else:
md_kwargs[k] = v
# Provide some suggestions if target is specified and invalid.
if target is not None and target not in entities:
import difflib
potential = list(entities.keys())
suggestions = difflib.get_close_matches(target, potential)
if suggestions:
message = "Did you mean one of: {}?".format(suggestions)
else:
message = "Valid targets are: {}".format(potential)
raise ValueError(("Unknown target '{}'. " + message)
.format(target))
results = []
# Search on entities
filters = ent_kwargs.copy()
for f in files.values():
if f._matches(filters, extensions, regex_search):
results.append(f)
# Search on metadata
if return_type not in {'dir', 'id'}:
if md_kwargs:
results = [f.path for f in results]
results = self.metadata_index.search(results, defined_fields,
**md_kwargs)
results = [files[f] for f in results]
# Convert to relative paths if needed
if absolute_paths is None: # can be overloaded as option to .get
absolute_paths = self.absolute_paths
if not absolute_paths:
for i, f in enumerate(results):
f = copy.copy(f)
f.path = os.path.relpath(f.path, self.root)
results[i] = f
if return_type == 'file':
results = natural_sort([f.path for f in results])
elif return_type in ['id', 'dir']:
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
results = [x for x in results if target in x.entities]
if return_type == 'id':
results = list(set([x.entities[target] for x in results]))
results = natural_sort(results)
elif return_type == 'dir':
template = entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
template = self.root + template
to_rep = re.findall(r'\{(.*?)\}', template)
for ent in to_rep:
patt = entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += r'[^\%s]*$' % os.path.sep
matches = [
f.dirname if absolute_paths else os.path.relpath(f.dirname, self.root)
for f in results
if re.search(template, f.dirname)
]
results = natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
else:
results = natural_sort(results, 'path')
return results
|
[
"\n Retrieve files and/or metadata from the current Layout.\n\n Args:\n return_type (str): Type of result to return. Valid values:\n 'object' (default): return a list of matching BIDSFile objects.\n 'file': return a list of matching filenames.\n 'dir': return a list of directories.\n 'id': return a list of unique IDs. Must be used together with\n a valid target.\n target (str): Optional name of the target entity to get results for\n (only used if return_type is 'dir' or 'id').\n extensions (str, list): One or more file extensions to filter on.\n BIDSFiles with any other extensions will be excluded.\n scope (str, list): Scope of the search space. If passed, only\n nodes/directories that match the specified scope will be\n searched. Possible values include:\n 'all' (default): search all available directories.\n 'derivatives': search all derivatives directories\n 'raw': search only BIDS-Raw directories\n <PipelineName>: the name of a BIDS-Derivatives pipeline\n regex_search (bool or None): Whether to require exact matching\n (False) or regex search (True) when comparing the query string\n to each entity.\n defined_fields (list): Optional list of names of metadata fields\n that must be defined in JSON sidecars in order to consider the\n file a match, but which don't need to match any particular\n value.\n absolute_paths (bool): Optionally override the instance-wide option\n to report either absolute or relative (to the top of the\n dataset) paths. If None, will fall back on the value specified\n at BIDSLayout initialization.\n kwargs (dict): Any optional key/values to filter the entities on.\n Keys are entity names, values are regexes to filter on. For\n example, passing filter={'subject': 'sub-[12]'} would return\n only files that match the first two subjects.\n\n Returns:\n A list of BIDSFiles (default) or strings (see return_type).\n\n Notes:\n As of pybids 0.7.0 some keywords have been changed. Namely: 'type'\n becomes 'suffix', 'modality' becomes 'datatype', 'acq' becomes \n 'acquisition' and 'mod' becomes 'modality'. Using the wrong version \n could result in get() silently returning wrong or no results. See \n the changelog for more details.\n "
] |
Please provide a description of the function:def get_file(self, filename, scope='all'):
''' Returns the BIDSFile object with the specified path.
Args:
filename (str): The path of the file to retrieve. Must be either
an absolute path, or relative to the root of this BIDSLayout.
scope (str, list): Scope of the search space. If passed, only
BIDSLayouts that match the specified scope will be
searched. See BIDSLayout docstring for valid values.
Returns: A BIDSFile, or None if no match was found.
'''
filename = os.path.abspath(os.path.join(self.root, filename))
layouts = self._get_layouts_in_scope(scope)
for ly in layouts:
if filename in ly.files:
return ly.files[filename]
return None
|
[] |
Please provide a description of the function:def get_collections(self, level, types=None, variables=None, merge=False,
sampling_rate=None, skip_empty=False, **kwargs):
from bids.variables import load_variables
index = load_variables(self, types=types, levels=level,
skip_empty=skip_empty, **kwargs)
return index.get_collections(level, variables, merge,
sampling_rate=sampling_rate)
|
[
"Return one or more variable Collections in the BIDS project.\n\n Args:\n level (str): The level of analysis to return variables for. Must be\n one of 'run', 'session', 'subject', or 'dataset'.\n types (str, list): Types of variables to retrieve. All valid values\n reflect the filename stipulated in the BIDS spec for each kind of\n variable. Valid values include: 'events', 'physio', 'stim',\n 'scans', 'participants', 'sessions', and 'regressors'.\n variables (list): Optional list of variables names to return. If\n None, all available variables are returned.\n merge (bool): If True, variables are merged across all observations\n of the current level. E.g., if level='subject', variables from\n all subjects will be merged into a single collection. If False,\n each observation is handled separately, and the result is\n returned as a list.\n sampling_rate (int, str): If level='run', the sampling rate to\n pass onto the returned BIDSRunVariableCollection.\n skip_empty (bool): Whether or not to skip empty Variables (i.e.,\n where there are no rows/records in a file after applying any\n filtering operations like dropping NaNs).\n kwargs: Optional additional arguments to pass onto load_variables.\n "
] |
Please provide a description of the function:def get_metadata(self, path, include_entities=False, **kwargs):
f = self.get_file(path)
# For querying efficiency, store metadata in the MetadataIndex cache
self.metadata_index.index_file(f.path)
if include_entities:
entities = f.entities
results = entities
else:
results = {}
results.update(self.metadata_index.file_index[path])
return results
|
[
"Return metadata found in JSON sidecars for the specified file.\n\n Args:\n path (str): Path to the file to get metadata for.\n include_entities (bool): If True, all available entities extracted\n from the filename (rather than JSON sidecars) are included in\n the returned metadata dictionary.\n kwargs (dict): Optional keyword arguments to pass onto\n get_nearest().\n\n Returns: A dictionary of key/value pairs extracted from all of the\n target file's associated JSON sidecars.\n\n Notes:\n A dictionary containing metadata extracted from all matching .json\n files is returned. In cases where the same key is found in multiple\n files, the values in files closer to the input filename will take\n precedence, per the inheritance rules in the BIDS specification.\n\n "
] |
Please provide a description of the function:def get_bval(self, path, **kwargs):
result = self.get_nearest(path, extensions='bval', suffix='dwi',
all_=True, **kwargs)
return listify(result)[0]
|
[
" Get bval file for passed path. "
] |
Please provide a description of the function:def get_fieldmap(self, path, return_list=False):
fieldmaps = self._get_fieldmaps(path)
if return_list:
return fieldmaps
else:
if len(fieldmaps) == 1:
return fieldmaps[0]
elif len(fieldmaps) > 1:
raise ValueError("More than one fieldmap found, but the "
"'return_list' argument was set to False. "
"Either ensure that there is only one "
"fieldmap for this image, or set the "
"'return_list' argument to True and handle "
"the result as a list.")
else: # len(fieldmaps) == 0
return None
|
[
" Get fieldmap(s) for specified path. "
] |
Please provide a description of the function:def get_tr(self, derivatives=False, **selectors):
# Constrain search to functional images
selectors.update(suffix='bold', datatype='func')
scope = None if derivatives else 'raw'
images = self.get(extensions=['.nii', '.nii.gz'], scope=scope,
**selectors)
if not images:
raise ValueError("No functional images that match criteria found.")
all_trs = set()
for img in images:
md = self.get_metadata(img.path, suffix='bold', full_search=True)
all_trs.add(round(float(md['RepetitionTime']), 5))
if len(all_trs) > 1:
raise ValueError("Unique TR cannot be found given selectors {!r}"
.format(selectors))
return all_trs.pop()
|
[
" Returns the scanning repetition time (TR) for one or more runs.\n\n Args:\n derivatives (bool): If True, also checks derivatives images.\n selectors: Optional keywords used to constrain the selected runs.\n Can be any arguments valid for a .get call (e.g., BIDS entities\n or JSON sidecar keys).\n \n Returns: A single float.\n\n Notes: Raises an exception if more than one unique TR is found.\n "
] |
Please provide a description of the function:def build_path(self, source, path_patterns=None, strict=False, scope='all'):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, BIDSFile, dict): The source data to use to construct
the new file path. Must be one of:
- A BIDSFile object
- A string giving the path of a BIDSFile contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used. Entities should be represented by the name
surrounded by curly braces. Optional portions of the patterns
should be denoted by square brackets. Entities that require a
specific value for the pattern to match can pass them inside
carets. Default values can be assigned by specifying a string
after the pipe operator. E.g., (e.g., {type<image>|bold} would
only match the pattern if the entity 'type' was passed and its
value is "image", otherwise the default value "bold" will be
used).
Example: 'sub-{subject}/[var-{name}/]{id}.csv'
Result: 'sub-01/var-SES/1045.csv'
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
scope (str, list): The scope of the search space. Indicates which
BIDSLayouts' path patterns to use. See BIDSLayout docstring
for valid values. By default, uses all available layouts. If
two or more values are provided, the order determines the
precedence of path patterns (i.e., earlier layouts will have
higher precedence).
'''
# 'is_file' is a crude check for Path objects
if isinstance(source, six.string_types) or hasattr(source, 'is_file'):
source = str(source)
if source not in self.files:
source = os.path.join(self.root, source)
source = self.get_file(source)
if isinstance(source, BIDSFile):
source = source.entities
if path_patterns is None:
layouts = self._get_layouts_in_scope(scope)
path_patterns = []
seen_configs = set()
for l in layouts:
for c in l.config.values():
if c in seen_configs:
continue
path_patterns.extend(c.default_path_patterns)
seen_configs.add(c)
return build_path(source, path_patterns, strict)
|
[] |
Please provide a description of the function:def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **kwargs):
_files = self.get(return_type='objects', **kwargs)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
|
[
"\n Copies one or more BIDSFiles to new locations defined by each\n BIDSFile's entities and the specified path_patterns.\n\n Args:\n files (list): Optional list of BIDSFile objects to write out. If\n none provided, use files from running a get() query using\n remaining **kwargs.\n path_patterns (str, list): Write patterns to pass to each file's\n write_file method.\n symbolic_links (bool): Whether to copy each file as a symbolic link\n or a deep copy.\n root (str): Optional root directory that all patterns are relative\n to. Defaults to current working directory.\n conflicts (str): Defines the desired action when the output path\n already exists. Must be one of:\n 'fail': raises an exception\n 'skip' does nothing\n 'overwrite': overwrites the existing file\n 'append': adds a suffix to each file copy, starting with 1\n kwargs (kwargs): Optional key word arguments to pass into a get()\n query.\n "
] |
Please provide a description of the function:def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False):
path = self.build_path(entities, path_patterns, strict)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
|
[
"\n Write arbitrary data to a file defined by the passed entities and\n path patterns.\n\n Args:\n entities (dict): A dictionary of entities, with Entity names in\n keys and values for the desired file in values.\n path_patterns (list): Optional path patterns to use when building\n the filename. If None, the Layout-defined patterns will be\n used.\n contents (object): Contents to write to the generate file path.\n Can be any object serializable as text or binary data (as\n defined in the content_mode argument).\n link_to (str): Optional path with which to create a symbolic link\n to. Used as an alternative to and takes priority over the\n contents argument.\n conflicts (str): Defines the desired action when the output path\n already exists. Must be one of:\n 'fail': raises an exception\n 'skip' does nothing\n 'overwrite': overwrites the existing file\n 'append': adds a suffix to each file copy, starting with 1\n strict (bool): If True, all entities must be matched inside a\n pattern in order to be a valid match. If False, extra entities\n "
] |
Please provide a description of the function:def index_file(self, f, overwrite=False):
if isinstance(f, six.string_types):
f = self.layout.get_file(f)
if f.path in self.file_index and not overwrite:
return
if 'suffix' not in f.entities: # Skip files without suffixes
return
md = self._get_metadata(f.path)
for md_key, md_val in md.items():
if md_key not in self.key_index:
self.key_index[md_key] = {}
self.key_index[md_key][f.path] = md_val
self.file_index[f.path][md_key] = md_val
|
[
"Index metadata for the specified file.\n\n Args:\n f (BIDSFile, str): A BIDSFile or path to an indexed file.\n overwrite (bool): If True, forces reindexing of the file even if\n an entry already exists.\n "
] |
Please provide a description of the function:def search(self, files=None, defined_fields=None, **kwargs):
if defined_fields is None:
defined_fields = []
all_keys = set(defined_fields) | set(kwargs.keys())
if not all_keys:
raise ValueError("At least one field to search on must be passed.")
# If no list of files is passed, use all files in layout
if files is None:
files = set(self.layout.files.keys())
# Index metadata for any previously unseen files
for f in files:
self.index_file(f)
# Get file intersection of all kwargs keys--this is fast
filesets = [set(self.key_index.get(k, [])) for k in all_keys]
matches = reduce(lambda x, y: x & y, filesets)
if files is not None:
matches &= set(files)
if not matches:
return []
def check_matches(f, key, val):
if isinstance(val, six.string_types) and '*' in val:
val = ('^%s$' % val).replace('*', ".*")
return re.search(str(self.file_index[f][key]), val) is not None
else:
return val == self.file_index[f][key]
# Serially check matches against each pattern, with early termination
for k, val in kwargs.items():
matches = list(filter(lambda x: check_matches(x, k, val), matches))
if not matches:
return []
return matches
|
[
"Search files in the layout by metadata fields.\n\n Args:\n files (list): Optional list of names of files to search. If None,\n all files in the layout are scanned.\n defined_fields (list): Optional list of names of fields that must\n be defined in the JSON sidecar in order to consider the file a\n match, but which don't need to match any particular value.\n kwargs: Optional keyword arguments defining search constraints;\n keys are names of metadata fields, and values are the values\n to match those fields against (e.g., SliceTiming=0.017) would\n return all files that have a SliceTiming value of 0.071 in\n metadata.\n\n Returns: A list of filenames that match all constraints.\n "
] |
Please provide a description of the function:def load_variables(layout, types=None, levels=None, skip_empty=True,
dataset=None, scope='all', **kwargs):
''' A convenience wrapper for one or more load_*_variables() calls.
Args:
layout (BIDSLayout): BIDSLayout containing variable files.
types (str, list): Types of variables to retrieve. All valid values
reflect the filename stipulated in the BIDS spec for each kind of
variable. Valid values include: 'events', 'physio', 'stim',
'scans', 'participants', 'sessions', and 'regressors'.
levels (str, list): Optional level(s) of variables to load. Valid
values are 'run', 'session', 'subject', or 'dataset'. This is
simply a shorthand way to specify types--e.g., 'run' will be
converted to types=['events', 'physio', 'stim', 'regressors'].
skip_empty (bool): Whether or not to skip empty Variables (i.e.,
where there are no rows/records in a file after applying any
filtering operations like dropping NaNs).
dataset (NodeIndex): An existing NodeIndex container to store the
loaded data in. Can be used to iteratively construct a dataset
that contains otherwise heterogeneous sets of variables. If None,
a new NodeIndex is used.
scope (str, list): The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
kwargs: Optional keyword arguments to pass onto the individual
load_*_variables() calls.
Returns:
A NodeIndex instance.
Example:
>>> load_variables(layout, ['events', 'physio'], subject='01')
# returns all variables stored in _events.tsv and _physio.tsv.gz files
# for runs that belong to subject with id '01'.
'''
TYPES = ['events', 'physio', 'stim', 'scans', 'participants', 'sessions',
'regressors']
types = listify(types)
if types is None:
if levels is not None:
types = []
lev_map = {
'run': ['events', 'physio', 'stim', 'regressors'],
'session': ['scans'],
'subject': ['sessions'],
'dataset': ['participants']
}
[types.extend(lev_map[l.lower()]) for l in listify(levels)]
else:
types = TYPES
bad_types = set(types) - set(TYPES)
if bad_types:
raise ValueError("Invalid variable types: %s" % bad_types)
dataset = dataset or NodeIndex()
run_types = list({'events', 'physio', 'stim', 'regressors'} - set(types))
type_flags = {t: False for t in run_types}
if len(type_flags) < 4:
_kwargs = kwargs.copy()
_kwargs.update(type_flags)
dataset = _load_time_variables(layout, dataset, scope=scope, **_kwargs)
for t in ({'scans', 'sessions', 'participants'} & set(types)):
kwargs.pop('suffix', None) # suffix is always one of values aboves
dataset = _load_tsv_variables(layout, t, dataset, scope=scope,
**kwargs)
return dataset
|
[] |
Please provide a description of the function:def _load_time_variables(layout, dataset=None, columns=None, scan_length=None,
drop_na=True, events=True, physio=True, stim=True,
regressors=True, skip_empty=True, scope='all',
**selectors):
''' Loads all variables found in *_events.tsv files and returns them as a
BIDSVariableCollection.
Args:
layout (BIDSLayout): A BIDSLayout to scan.
dataset (NodeIndex): A BIDS NodeIndex container. If None, a new one is
initialized.
columns (list): Optional list of names specifying which columns in the
event files to read. By default, reads all columns found.
scan_length (float): Optional duration of runs (in seconds). By
default, this will be extracted from the BOLD image. However, in
cases where the user doesn't have access to the images (e.g.,
because only file handles are locally available), a fixed duration
can be manually specified as a fallback.
drop_na (bool): If True, removes all events where amplitude is n/a. If
False, leaves n/a values intact. Note that in the latter case,
transformations that requires numeric values may fail.
events (bool): If True, extracts variables from events.tsv
files.
physio (bool): If True, extracts variables from _physio files.
stim (bool): If True, extracts variables from _stim files.
skip_empty (bool): Whether or not to skip empty Variables (i.e.,
where there are no rows/records in a file, or all onsets,
durations, and amplitudes are 0).
scope (str, list): The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
selectors (dict): Optional keyword arguments passed onto the
BIDSLayout instance's get() method; can be used to constrain
which data are loaded.
Returns: A NodeIndex instance.
'''
# Extract any non-keyword arguments
kwargs = selectors.copy()
if dataset is None:
dataset = NodeIndex()
selectors['datatype'] = 'func'
selectors['suffix'] = 'bold'
images = layout.get(return_type='object', extensions='.nii.gz',
scope=scope, **selectors)
if not images:
raise ValueError("No functional images that match criteria found.")
# Main loop over images
for img_obj in images:
entities = img_obj.entities
img_f = img_obj.path
# Run is not mandatory, but we need a default for proper indexing
if 'run' in entities:
entities['run'] = int(entities['run'])
tr = layout.get_metadata(img_f, suffix='bold', scope=scope,
full_search=True)['RepetitionTime']
# Get duration of run: first try to get it directly from the image
# header; if that fails, try to get NumberOfVolumes from the
# run metadata; if that fails, look for a scan_length argument.
try:
import nibabel as nb
img = nb.load(img_f)
duration = img.shape[3] * tr
except Exception as e:
if scan_length is not None:
duration = scan_length
else:
msg = ("Unable to extract scan duration from one or more "
"BOLD runs, and no scan_length argument was provided "
"as a fallback. Please check that the image files are "
"available, or manually specify the scan duration.")
raise ValueError(msg)
run = dataset.get_or_create_node('run', entities, image_file=img_f,
duration=duration, repetition_time=tr)
run_info = run.get_info()
# Process event files
if events:
dfs = layout.get_nearest(img_f, extensions='.tsv', suffix='events',
all_=True, full_search=True,
ignore_strict_entities=['suffix'])
if dfs is not None:
for _data in dfs:
_data = pd.read_csv(_data, sep='\t')
if 'amplitude' in _data.columns:
if (_data['amplitude'].astype(int) == 1).all() and \
'trial_type' in _data.columns:
msg = ("Column 'amplitude' with constant value 1 "
"is unnecessary in event files; ignoring "
"it.")
_data = _data.drop('amplitude', axis=1)
else:
msg = ("Column name 'amplitude' is reserved; "
"renaming it to 'amplitude_'.")
_data = _data.rename(
columns={'amplitude': 'amplitude_'})
warnings.warn(msg)
_data = _data.replace('n/a', np.nan) # Replace BIDS' n/a
_data = _data.apply(pd.to_numeric, errors='ignore')
_cols = columns or list(set(_data.columns.tolist()) -
{'onset', 'duration'})
# Construct a DataFrame for each extra column
for col in _cols:
df = _data[['onset', 'duration']].copy()
df['amplitude'] = _data[col].values
# Add in all of the run's entities as new columns for
# index
for entity, value in entities.items():
if entity in ALL_ENTITIES:
df[entity] = value
if drop_na:
df = df.dropna(subset=['amplitude'])
if df.empty:
continue
var = SparseRunVariable(name=col, data=df, run_info=run_info,
source='events')
run.add_variable(var)
# Process confound files
if regressors:
sub_ents = {k: v for k, v in entities.items()
if k in BASE_ENTITIES}
confound_files = layout.get(suffix='regressors', scope=scope,
**sub_ents)
for cf in confound_files:
_data = pd.read_csv(cf.path, sep='\t', na_values='n/a')
if columns is not None:
conf_cols = list(set(_data.columns) & set(columns))
_data = _data.loc[:, conf_cols]
for col in _data.columns:
sr = 1. / run.repetition_time
var = DenseRunVariable(name=col, values=_data[[col]],
run_info=run_info, source='regressors',
sampling_rate=sr)
run.add_variable(var)
# Process recordinging files
rec_types = []
if physio:
rec_types.append('physio')
if stim:
rec_types.append('stim')
if rec_types:
rec_files = layout.get_nearest(img_f, extensions='.tsv.gz',
all_=True, suffix=rec_types,
ignore_strict_entities=['suffix'],
full_search=True)
for rf in rec_files:
metadata = layout.get_metadata(rf)
if not metadata:
raise ValueError("No .json sidecar found for '%s'." % rf)
data = pd.read_csv(rf, sep='\t')
freq = metadata['SamplingFrequency']
st = metadata['StartTime']
rf_cols = metadata['Columns']
data.columns = rf_cols
# Filter columns if user passed names
if columns is not None:
rf_cols = list(set(rf_cols) & set(columns))
data = data.loc[:, rf_cols]
n_cols = len(rf_cols)
if not n_cols:
continue
# Keep only in-scan samples
if st < 0:
start_ind = np.floor(-st * freq)
values = data.values[start_ind:, :]
else:
values = data.values
if st > 0:
n_pad = freq * st
pad = np.zeros((n_pad, n_cols))
values = np.r_[pad, values]
n_rows = int(run.duration * freq)
if len(values) > n_rows:
values = values[:n_rows, :]
elif len(values) < n_rows:
pad = np.zeros((n_rows - len(values), n_cols))
values = np.r_[values, pad]
df = pd.DataFrame(values, columns=rf_cols)
source = 'physio' if '_physio.tsv' in rf else 'stim'
for col in df.columns:
var = DenseRunVariable(name=col, values=df[[col]], run_info=run_info,
source=source, sampling_rate=freq)
run.add_variable(var)
return dataset
|
[] |
Please provide a description of the function:def _load_tsv_variables(layout, suffix, dataset=None, columns=None,
prepend_type=False, scope='all', **selectors):
''' Reads variables from scans.tsv, sessions.tsv, and participants.tsv.
Args:
layout (BIDSLayout): The BIDSLayout to use.
suffix (str): The suffix of file to read from. Must be one of 'scans',
'sessions', or 'participants'.
dataset (NodeIndex): A BIDS NodeIndex container. If None, a new one is
initialized.
columns (list): Optional list of names specifying which columns in the
files to return. If None, all columns are returned.
prepend_type (bool): If True, variable names are prepended with the
type name (e.g., 'age' becomes 'participants.age').
scope (str, list): The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
selectors (dict): Optional keyword arguments passed onto the
BIDSLayout instance's get() method; can be used to constrain
which data are loaded.
Returns: A NodeIndex instance.
'''
# Sanitize the selectors: only keep entities at current level or above
remap = {'scans': 'run', 'sessions': 'session', 'participants': 'subject'}
level = remap[suffix]
valid_entities = BASE_ENTITIES[:BASE_ENTITIES.index(level)]
layout_kwargs = {k: v for k, v in selectors.items() if k in valid_entities}
if dataset is None:
dataset = NodeIndex()
files = layout.get(extensions='.tsv', return_type='file', suffix=suffix,
scope=scope, **layout_kwargs)
for f in files:
f = layout.files[f]
_data = pd.read_csv(f.path, sep='\t')
# Entities can be defined either within the first column of the .tsv
# file (for entities that vary by row), or from the full file path
# (for entities constant over all rows in the file). We extract both
# and store them in the main DataFrame alongside other variables (as
# they'll be extracted when the BIDSVariable is initialized anyway).
for ent_name, ent_val in f.entities.items():
if ent_name in ALL_ENTITIES:
_data[ent_name] = ent_val
# Handling is a bit more convoluted for scans.tsv, because the first
# column contains the run filename, which we also need to parse.
if suffix == 'scans':
# Suffix is guaranteed to be present in each filename, so drop the
# constant column with value 'scans' to make way for it and prevent
# two 'suffix' columns.
_data.drop(columns='suffix', inplace=True)
image = _data['filename']
_data = _data.drop('filename', axis=1)
dn = f.dirname
paths = [join(dn, p) for p in image.values]
ent_recs = [layout.files[p].entities for p in paths
if p in layout.files]
ent_cols = pd.DataFrame.from_records(ent_recs)
_data = pd.concat([_data, ent_cols], axis=1, sort=True)
# It's possible to end up with duplicate entity columns this way
_data = _data.T.drop_duplicates().T
# The BIDS spec requires ID columns to be named 'session_id', 'run_id',
# etc., and IDs begin with entity prefixes (e.g., 'sub-01'). To ensure
# consistent internal handling, we strip these suffixes and prefixes.
elif suffix == 'sessions':
_data = _data.rename(columns={'session_id': 'session'})
_data['session'] = _data['session'].str.replace('ses-', '')
elif suffix == 'participants':
_data = _data.rename(columns={'participant_id': 'subject'})
_data['subject'] = _data['subject'].str.replace('sub-', '')
def make_patt(x, regex_search=False):
patt = '%s' % x
if isinstance(x, (int, float)):
# allow for leading zeros if a number was specified
# regardless of regex_search
patt = '0*' + patt
if not regex_search:
patt = '^%s$' % patt
return patt
# Filter rows on all selectors
comm_cols = list(set(_data.columns) & set(selectors.keys()))
for col in comm_cols:
ent_patts = [make_patt(x, regex_search=layout.regex_search)
for x in listify(selectors.get(col))]
patt = '|'.join(ent_patts)
_data = _data[_data[col].str.contains(patt)]
level = {'scans': 'session', 'sessions': 'subject',
'participants': 'dataset'}[suffix]
node = dataset.get_or_create_node(level, f.entities)
ent_cols = list(set(ALL_ENTITIES) & set(_data.columns))
amp_cols = list(set(_data.columns) - set(ent_cols))
if columns is not None:
amp_cols = list(set(amp_cols) & set(columns))
for col_name in amp_cols:
# Rename colummns: values must be in 'amplitude'
df = _data.loc[:, [col_name] + ent_cols]
df.columns = ['amplitude'] + ent_cols
if prepend_type:
col_name = '%s.%s' % (suffix, col_name)
node.add_variable(SimpleVariable(name=col_name, data=df, source=suffix))
return dataset
|
[] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.