repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter._index_mountpoints
def _index_mountpoints(self): """Finds all mountpoints and stores them in :attr:`mountpoints`""" # find all mountponits self.mountpoints = {} # noinspection PyBroadException try: result = _util.check_output_(['mount']) for line in result.splitlines(): m = re.match(r'(.+) on (.+) type (.+) \((.+)\)', line) if m: self.mountpoints[m.group(2)] = (m.group(1), m.group(3), m.group(4)) except Exception: pass
python
def _index_mountpoints(self): """Finds all mountpoints and stores them in :attr:`mountpoints`""" # find all mountponits self.mountpoints = {} # noinspection PyBroadException try: result = _util.check_output_(['mount']) for line in result.splitlines(): m = re.match(r'(.+) on (.+) type (.+) \((.+)\)', line) if m: self.mountpoints[m.group(2)] = (m.group(1), m.group(3), m.group(4)) except Exception: pass
Finds all mountpoints and stores them in :attr:`mountpoints`
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L88-L101
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter._index_loopbacks
def _index_loopbacks(self): """Finds all loopbacks and stores them in :attr:`loopbacks`""" self.loopbacks = {} try: result = _util.check_output_(['losetup', '-a']) for line in result.splitlines(): m = re.match(r'(.+): (.+) \((.+)\).*', line) if m: self.loopbacks[m.group(1)] = m.group(3) except Exception: pass
python
def _index_loopbacks(self): """Finds all loopbacks and stores them in :attr:`loopbacks`""" self.loopbacks = {} try: result = _util.check_output_(['losetup', '-a']) for line in result.splitlines(): m = re.match(r'(.+): (.+) \((.+)\).*', line) if m: self.loopbacks[m.group(1)] = m.group(3) except Exception: pass
Finds all loopbacks and stores them in :attr:`loopbacks`
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L103-L114
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.find_bindmounts
def find_bindmounts(self): """Finds all bind mountpoints that are inside mounts that match the :attr:`re_pattern`""" for mountpoint, (orig, fs, opts) in self.mountpoints.items(): if 'bind' in opts and re.match(self.re_pattern, mountpoint): yield mountpoint
python
def find_bindmounts(self): """Finds all bind mountpoints that are inside mounts that match the :attr:`re_pattern`""" for mountpoint, (orig, fs, opts) in self.mountpoints.items(): if 'bind' in opts and re.match(self.re_pattern, mountpoint): yield mountpoint
Finds all bind mountpoints that are inside mounts that match the :attr:`re_pattern`
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L116-L121
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.find_mounts
def find_mounts(self): """Finds all mountpoints that are mounted to a directory matching :attr:`re_pattern` or originate from a directory matching :attr:`orig_re_pattern`. """ for mountpoint, (orig, fs, opts) in self.mountpoints.items(): if 'bind' not in opts and (re.match(self.orig_re_pattern, orig) or (self.be_greedy and re.match(self.re_pattern, mountpoint))): yield mountpoint
python
def find_mounts(self): """Finds all mountpoints that are mounted to a directory matching :attr:`re_pattern` or originate from a directory matching :attr:`orig_re_pattern`. """ for mountpoint, (orig, fs, opts) in self.mountpoints.items(): if 'bind' not in opts and (re.match(self.orig_re_pattern, orig) or (self.be_greedy and re.match(self.re_pattern, mountpoint))): yield mountpoint
Finds all mountpoints that are mounted to a directory matching :attr:`re_pattern` or originate from a directory matching :attr:`orig_re_pattern`.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L123-L131
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.find_base_images
def find_base_images(self): """Finds all mountpoints that are mounted to a directory matching :attr:`orig_re_pattern`.""" for mountpoint, _ in self.mountpoints.items(): if re.match(self.orig_re_pattern, mountpoint): yield mountpoint
python
def find_base_images(self): """Finds all mountpoints that are mounted to a directory matching :attr:`orig_re_pattern`.""" for mountpoint, _ in self.mountpoints.items(): if re.match(self.orig_re_pattern, mountpoint): yield mountpoint
Finds all mountpoints that are mounted to a directory matching :attr:`orig_re_pattern`.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L133-L138
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.find_volume_groups
def find_volume_groups(self): """Finds all volume groups that are mounted through a loopback originating from :attr:`orig_re_pattern`. Generator yields tuples of vgname, pvname """ os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find volume groups try: result = _util.check_output_(['pvdisplay']) pvname = vgname = None for line in result.splitlines(): if '--- Physical volume ---' in line: pvname = vgname = None elif "PV Name" in line: pvname = line.replace("PV Name", "").strip() elif "VG Name" in line: vgname = line.replace("VG Name", "").strip() if pvname and vgname: try: # unmount volume groups with a physical volume originating from a disk image if re.match(self.orig_re_pattern, self.loopbacks[pvname]): yield vgname, pvname except Exception: pass pvname = vgname = None except Exception: pass
python
def find_volume_groups(self): """Finds all volume groups that are mounted through a loopback originating from :attr:`orig_re_pattern`. Generator yields tuples of vgname, pvname """ os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find volume groups try: result = _util.check_output_(['pvdisplay']) pvname = vgname = None for line in result.splitlines(): if '--- Physical volume ---' in line: pvname = vgname = None elif "PV Name" in line: pvname = line.replace("PV Name", "").strip() elif "VG Name" in line: vgname = line.replace("VG Name", "").strip() if pvname and vgname: try: # unmount volume groups with a physical volume originating from a disk image if re.match(self.orig_re_pattern, self.loopbacks[pvname]): yield vgname, pvname except Exception: pass pvname = vgname = None except Exception: pass
Finds all volume groups that are mounted through a loopback originating from :attr:`orig_re_pattern`. Generator yields tuples of vgname, pvname
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L140-L170
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.find_loopbacks
def find_loopbacks(self): """Finds all loopbacks originating from :attr:`orig_re_pattern`. Generator yields device names """ for dev, source in self.loopbacks.items(): if re.match(self.orig_re_pattern, source): yield dev
python
def find_loopbacks(self): """Finds all loopbacks originating from :attr:`orig_re_pattern`. Generator yields device names """ for dev, source in self.loopbacks.items(): if re.match(self.orig_re_pattern, source): yield dev
Finds all loopbacks originating from :attr:`orig_re_pattern`. Generator yields device names
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L172-L180
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.unmount_bindmounts
def unmount_bindmounts(self): """Unmounts all bind mounts identified by :func:`find_bindmounts`""" for mountpoint in self.find_bindmounts(): _util.clean_unmount(['umount'], mountpoint, rmdir=False)
python
def unmount_bindmounts(self): """Unmounts all bind mounts identified by :func:`find_bindmounts`""" for mountpoint in self.find_bindmounts(): _util.clean_unmount(['umount'], mountpoint, rmdir=False)
Unmounts all bind mounts identified by :func:`find_bindmounts`
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L182-L186
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.unmount_volume_groups
def unmount_volume_groups(self): """Unmounts all volume groups and related loopback devices as identified by :func:`find_volume_groups`""" for vgname, pvname in self.find_volume_groups(): _util.check_output_(['lvchange', '-a', 'n', vgname]) _util.check_output_(['losetup', '-d', pvname])
python
def unmount_volume_groups(self): """Unmounts all volume groups and related loopback devices as identified by :func:`find_volume_groups`""" for vgname, pvname in self.find_volume_groups(): _util.check_output_(['lvchange', '-a', 'n', vgname]) _util.check_output_(['losetup', '-d', pvname])
Unmounts all volume groups and related loopback devices as identified by :func:`find_volume_groups`
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L200-L205
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.unmount_loopbacks
def unmount_loopbacks(self): """Unmounts all loopback devices as identified by :func:`find_loopbacks`""" # re-index loopback devices self._index_loopbacks() for dev in self.find_loopbacks(): _util.check_output_(['losetup', '-d', dev])
python
def unmount_loopbacks(self): """Unmounts all loopback devices as identified by :func:`find_loopbacks`""" # re-index loopback devices self._index_loopbacks() for dev in self.find_loopbacks(): _util.check_output_(['losetup', '-d', dev])
Unmounts all loopback devices as identified by :func:`find_loopbacks`
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L207-L214
ralphje/imagemounter
imagemounter/unmounter.py
Unmounter.find_clean_dirs
def find_clean_dirs(self): """Finds all (temporary) directories according to the glob and re patterns that should be cleaned.""" for folder in glob.glob(self.glob_pattern): if re.match(self.re_pattern, folder): yield folder for folder in glob.glob(self.orig_glob_pattern): if re.match(self.orig_re_pattern, folder): yield folder
python
def find_clean_dirs(self): """Finds all (temporary) directories according to the glob and re patterns that should be cleaned.""" for folder in glob.glob(self.glob_pattern): if re.match(self.re_pattern, folder): yield folder for folder in glob.glob(self.orig_glob_pattern): if re.match(self.orig_re_pattern, folder): yield folder
Finds all (temporary) directories according to the glob and re patterns that should be cleaned.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L216-L224
ralphje/imagemounter
imagemounter/_util.py
expand_path
def expand_path(path): """ Expand the given path to either an Encase image or a dd image i.e. if path is '/path/to/image.E01' then the result of this method will be /path/to/image.E*' and if path is '/path/to/image.001' then the result of this method will be '/path/to/image.[0-9][0-9]?' """ if is_encase(path): return glob.glob(path[:-2] + '??') or [path] ext_match = re.match(r'^.*\.(\d{2,})$', path) if ext_match is not None: ext_size = len(ext_match.groups()[-1]) return glob.glob(path[:-ext_size] + '[0-9]' * ext_size) or [path] else: return [path]
python
def expand_path(path): """ Expand the given path to either an Encase image or a dd image i.e. if path is '/path/to/image.E01' then the result of this method will be /path/to/image.E*' and if path is '/path/to/image.001' then the result of this method will be '/path/to/image.[0-9][0-9]?' """ if is_encase(path): return glob.glob(path[:-2] + '??') or [path] ext_match = re.match(r'^.*\.(\d{2,})$', path) if ext_match is not None: ext_size = len(ext_match.groups()[-1]) return glob.glob(path[:-ext_size] + '[0-9]' * ext_size) or [path] else: return [path]
Expand the given path to either an Encase image or a dd image i.e. if path is '/path/to/image.E01' then the result of this method will be /path/to/image.E*' and if path is '/path/to/image.001' then the result of this method will be '/path/to/image.[0-9][0-9]?'
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/_util.py#L78-L93
ralphje/imagemounter
imagemounter/parser.py
ImageParser.add_disk
def add_disk(self, path, force_disk_indexes=True, **args): """Adds a disk specified by the path to the ImageParser. :param path: The path to the disk volume :param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the second volume you add. If you plan on using this method, always leave this True. If you add a second disk when the previous disk has no index, an error is raised. :param args: Arguments to pass to the constructor of the Disk. """ if self.disks and self.disks[0].index is None: raise DiskIndexError("First disk has no index.") if force_disk_indexes or self.disks: index = len(self.disks) + 1 else: index = None disk = Disk(self, path, index=str(index) if index else None, **args) self.disks.append(disk) return disk
python
def add_disk(self, path, force_disk_indexes=True, **args): """Adds a disk specified by the path to the ImageParser. :param path: The path to the disk volume :param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the second volume you add. If you plan on using this method, always leave this True. If you add a second disk when the previous disk has no index, an error is raised. :param args: Arguments to pass to the constructor of the Disk. """ if self.disks and self.disks[0].index is None: raise DiskIndexError("First disk has no index.") if force_disk_indexes or self.disks: index = len(self.disks) + 1 else: index = None disk = Disk(self, path, index=str(index) if index else None, **args) self.disks.append(disk) return disk
Adds a disk specified by the path to the ImageParser. :param path: The path to the disk volume :param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the second volume you add. If you plan on using this method, always leave this True. If you add a second disk when the previous disk has no index, an error is raised. :param args: Arguments to pass to the constructor of the Disk.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L75-L93
ralphje/imagemounter
imagemounter/parser.py
ImageParser.init
def init(self, single=None, swallow_exceptions=True): """Handles all important disk-mounting tasks, i.e. calls the :func:`Disk.init` function on all underlying disks. It yields every volume that is encountered, including volumes that have not been mounted. :param single: indicates whether the :class:`Disk` should be mounted as a single disk, not as a single disk or whether it should try both (defaults to :const:`None`) :type single: bool|None :param swallow_exceptions: specify whether you want the init calls to swallow exceptions :rtype: generator """ for d in self.disks: for v in d.init(single, swallow_exceptions=swallow_exceptions): yield v
python
def init(self, single=None, swallow_exceptions=True): """Handles all important disk-mounting tasks, i.e. calls the :func:`Disk.init` function on all underlying disks. It yields every volume that is encountered, including volumes that have not been mounted. :param single: indicates whether the :class:`Disk` should be mounted as a single disk, not as a single disk or whether it should try both (defaults to :const:`None`) :type single: bool|None :param swallow_exceptions: specify whether you want the init calls to swallow exceptions :rtype: generator """ for d in self.disks: for v in d.init(single, swallow_exceptions=swallow_exceptions): yield v
Handles all important disk-mounting tasks, i.e. calls the :func:`Disk.init` function on all underlying disks. It yields every volume that is encountered, including volumes that have not been mounted. :param single: indicates whether the :class:`Disk` should be mounted as a single disk, not as a single disk or whether it should try both (defaults to :const:`None`) :type single: bool|None :param swallow_exceptions: specify whether you want the init calls to swallow exceptions :rtype: generator
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L95-L107
ralphje/imagemounter
imagemounter/parser.py
ImageParser.mount_disks
def mount_disks(self): """Mounts all disks in the parser, i.e. calling :func:`Disk.mount` on all underlying disks. You probably want to use :func:`init` instead. :return: whether all mounts have succeeded :rtype: bool""" result = True for disk in self.disks: result = disk.mount() and result return result
python
def mount_disks(self): """Mounts all disks in the parser, i.e. calling :func:`Disk.mount` on all underlying disks. You probably want to use :func:`init` instead. :return: whether all mounts have succeeded :rtype: bool""" result = True for disk in self.disks: result = disk.mount() and result return result
Mounts all disks in the parser, i.e. calling :func:`Disk.mount` on all underlying disks. You probably want to use :func:`init` instead. :return: whether all mounts have succeeded :rtype: bool
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L109-L119
ralphje/imagemounter
imagemounter/parser.py
ImageParser.rw_active
def rw_active(self): """Indicates whether a read-write cache is active in any of the disks. :rtype: bool""" result = False for disk in self.disks: result = disk.rw_active() or result return result
python
def rw_active(self): """Indicates whether a read-write cache is active in any of the disks. :rtype: bool""" result = False for disk in self.disks: result = disk.rw_active() or result return result
Indicates whether a read-write cache is active in any of the disks. :rtype: bool
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L121-L128
ralphje/imagemounter
imagemounter/parser.py
ImageParser.init_volumes
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True): """Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls :func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`. :rtype: generator""" for disk in self.disks: logger.info("Mounting volumes in {0}".format(disk)) for volume in disk.init_volumes(single, only_mount, skip_mount, swallow_exceptions=swallow_exceptions): yield volume
python
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True): """Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls :func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`. :rtype: generator""" for disk in self.disks: logger.info("Mounting volumes in {0}".format(disk)) for volume in disk.init_volumes(single, only_mount, skip_mount, swallow_exceptions=swallow_exceptions): yield volume
Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls :func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`. :rtype: generator
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L130-L139
ralphje/imagemounter
imagemounter/parser.py
ImageParser.get_by_index
def get_by_index(self, index): """Returns a Volume or Disk by its index.""" try: return self[index] except KeyError: for v in self.get_volumes(): if v.index == str(index): return v raise KeyError(index)
python
def get_by_index(self, index): """Returns a Volume or Disk by its index.""" try: return self[index] except KeyError: for v in self.get_volumes(): if v.index == str(index): return v raise KeyError(index)
Returns a Volume or Disk by its index.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L141-L150
ralphje/imagemounter
imagemounter/parser.py
ImageParser.get_volumes
def get_volumes(self): """Gets a list of all volumes of all disks, concatenating :func:`Disk.get_volumes` of all disks. :rtype: list""" volumes = [] for disk in self.disks: volumes.extend(disk.get_volumes()) return volumes
python
def get_volumes(self): """Gets a list of all volumes of all disks, concatenating :func:`Disk.get_volumes` of all disks. :rtype: list""" volumes = [] for disk in self.disks: volumes.extend(disk.get_volumes()) return volumes
Gets a list of all volumes of all disks, concatenating :func:`Disk.get_volumes` of all disks. :rtype: list
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L152-L160
ralphje/imagemounter
imagemounter/parser.py
ImageParser.clean
def clean(self, remove_rw=False, allow_lazy=False): """Cleans all volumes of all disks (:func:`Volume.unmount`) and all disks (:func:`Disk.unmount`). Volume errors are ignored, but returns immediately on disk unmount error. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed. """ # To ensure clean unmount after reconstruct, we sort across all volumes in all our disks to provide a proper # order volumes = list(sorted(self.get_volumes(), key=lambda v: v.mountpoint or "", reverse=True)) for v in volumes: try: v.unmount(allow_lazy=allow_lazy) except ImageMounterError: logger.error("Error unmounting volume {0}".format(v.mountpoint)) # Now just clean the rest. for disk in self.disks: disk.unmount(remove_rw, allow_lazy=allow_lazy)
python
def clean(self, remove_rw=False, allow_lazy=False): """Cleans all volumes of all disks (:func:`Volume.unmount`) and all disks (:func:`Disk.unmount`). Volume errors are ignored, but returns immediately on disk unmount error. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed. """ # To ensure clean unmount after reconstruct, we sort across all volumes in all our disks to provide a proper # order volumes = list(sorted(self.get_volumes(), key=lambda v: v.mountpoint or "", reverse=True)) for v in volumes: try: v.unmount(allow_lazy=allow_lazy) except ImageMounterError: logger.error("Error unmounting volume {0}".format(v.mountpoint)) # Now just clean the rest. for disk in self.disks: disk.unmount(remove_rw, allow_lazy=allow_lazy)
Cleans all volumes of all disks (:func:`Volume.unmount`) and all disks (:func:`Disk.unmount`). Volume errors are ignored, but returns immediately on disk unmount error. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L162-L183
ralphje/imagemounter
imagemounter/parser.py
ImageParser.force_clean
def force_clean(self, remove_rw=False, allow_lazy=False, retries=5, sleep_interval=0.5): """Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts run out, it will raise the last error. Note that the method will only catch :class:`ImageMounterError` exceptions. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :param retries: Maximum amount of retries while unmounting :param sleep_interval: The sleep interval between attempts. :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed. """ while True: try: self.clean(remove_rw=remove_rw, allow_lazy=allow_lazy) except ImageMounterError: if retries == 0: raise retries -= 1 time.sleep(sleep_interval) else: return
python
def force_clean(self, remove_rw=False, allow_lazy=False, retries=5, sleep_interval=0.5): """Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts run out, it will raise the last error. Note that the method will only catch :class:`ImageMounterError` exceptions. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :param retries: Maximum amount of retries while unmounting :param sleep_interval: The sleep interval between attempts. :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed. """ while True: try: self.clean(remove_rw=remove_rw, allow_lazy=allow_lazy) except ImageMounterError: if retries == 0: raise retries -= 1 time.sleep(sleep_interval) else: return
Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts run out, it will raise the last error. Note that the method will only catch :class:`ImageMounterError` exceptions. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :param retries: Maximum amount of retries while unmounting :param sleep_interval: The sleep interval between attempts. :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L185-L208
ralphje/imagemounter
imagemounter/parser.py
ImageParser.reconstruct
def reconstruct(self): """Reconstructs the filesystem of all volumes mounted by the parser by inspecting the last mount point and bind mounting everything. :raises: NoRootFoundError if no root could be found :return: the root :class:`Volume` """ volumes = list(sorted((v for v in self.get_volumes() if v.mountpoint and v.info.get('lastmountpoint')), key=lambda v: v.numeric_index)) try: root = list(filter(lambda x: x.info.get('lastmountpoint') == '/', volumes))[0] except IndexError: logger.error("Could not find / while reconstructing, aborting!") raise NoRootFoundError() volumes.remove(root) for v in volumes: if v.info.get('lastmountpoint') == root.info.get('lastmountpoint'): logger.debug("Skipping volume %s as it has the same root as %s", v, root) continue v.bindmount(os.path.join(root.mountpoint, v.info.get('lastmountpoint')[1:])) return root
python
def reconstruct(self): """Reconstructs the filesystem of all volumes mounted by the parser by inspecting the last mount point and bind mounting everything. :raises: NoRootFoundError if no root could be found :return: the root :class:`Volume` """ volumes = list(sorted((v for v in self.get_volumes() if v.mountpoint and v.info.get('lastmountpoint')), key=lambda v: v.numeric_index)) try: root = list(filter(lambda x: x.info.get('lastmountpoint') == '/', volumes))[0] except IndexError: logger.error("Could not find / while reconstructing, aborting!") raise NoRootFoundError() volumes.remove(root) for v in volumes: if v.info.get('lastmountpoint') == root.info.get('lastmountpoint'): logger.debug("Skipping volume %s as it has the same root as %s", v, root) continue v.bindmount(os.path.join(root.mountpoint, v.info.get('lastmountpoint')[1:])) return root
Reconstructs the filesystem of all volumes mounted by the parser by inspecting the last mount point and bind mounting everything. :raises: NoRootFoundError if no root could be found :return: the root :class:`Volume`
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L210-L233
ralphje/imagemounter
imagemounter/volume_system.py
VolumeSystem._make_subvolume
def _make_subvolume(self, **args): """Creates a subvolume, adds it to this class and returns it.""" from imagemounter.volume import Volume v = Volume(disk=self.disk, parent=self.parent, volume_detector=self.volume_detector, **args) # vstype is not passed down, let it decide for itself. self.volumes.append(v) return v
python
def _make_subvolume(self, **args): """Creates a subvolume, adds it to this class and returns it.""" from imagemounter.volume import Volume v = Volume(disk=self.disk, parent=self.parent, volume_detector=self.volume_detector, **args) # vstype is not passed down, let it decide for itself. self.volumes.append(v) return v
Creates a subvolume, adds it to this class and returns it.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L68-L76
ralphje/imagemounter
imagemounter/volume_system.py
VolumeSystem._make_single_subvolume
def _make_single_subvolume(self, only_one=True, **args): """Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it. :param bool only_one: if this volume system already has at least one volume, it is returned instead. """ if only_one and self.volumes: return self.volumes[0] if self.parent.index is None: index = '0' else: index = '{0}.0'.format(self.parent.index) volume = self._make_subvolume(index=index, **args) return volume
python
def _make_single_subvolume(self, only_one=True, **args): """Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it. :param bool only_one: if this volume system already has at least one volume, it is returned instead. """ if only_one and self.volumes: return self.volumes[0] if self.parent.index is None: index = '0' else: index = '{0}.0'.format(self.parent.index) volume = self._make_subvolume(index=index, **args) return volume
Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it. :param bool only_one: if this volume system already has at least one volume, it is returned instead.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L78-L92
ralphje/imagemounter
imagemounter/volume_system.py
VolumeSystem.detect_volumes
def detect_volumes(self, vstype=None, method=None, force=False): """Iterator for detecting volumes within this volume system. :param str vstype: The volume system type to use. If None, uses :attr:`vstype` :param str method: The detection method to use. If None, uses :attr:`detection` :param bool force: Specify if you wnat to force running the detection if has_Detected is True. """ if self.has_detected and not force: logger.warning("Detection already ran.") return if vstype is None: vstype = self.vstype if method is None: method = self.volume_detector if method == 'auto': method = VolumeSystem._determine_auto_detection_method() if method in ALL_VOLUME_SYSTEM_DETECTORS: for v in ALL_VOLUME_SYSTEM_DETECTORS[method].detect(self, vstype): yield v else: logger.error("No viable detection method found") raise ArgumentError("No viable detection method found") self.has_detected = True
python
def detect_volumes(self, vstype=None, method=None, force=False): """Iterator for detecting volumes within this volume system. :param str vstype: The volume system type to use. If None, uses :attr:`vstype` :param str method: The detection method to use. If None, uses :attr:`detection` :param bool force: Specify if you wnat to force running the detection if has_Detected is True. """ if self.has_detected and not force: logger.warning("Detection already ran.") return if vstype is None: vstype = self.vstype if method is None: method = self.volume_detector if method == 'auto': method = VolumeSystem._determine_auto_detection_method() if method in ALL_VOLUME_SYSTEM_DETECTORS: for v in ALL_VOLUME_SYSTEM_DETECTORS[method].detect(self, vstype): yield v else: logger.error("No viable detection method found") raise ArgumentError("No viable detection method found") self.has_detected = True
Iterator for detecting volumes within this volume system. :param str vstype: The volume system type to use. If None, uses :attr:`vstype` :param str method: The detection method to use. If None, uses :attr:`detection` :param bool force: Specify if you wnat to force running the detection if has_Detected is True.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L94-L120
ralphje/imagemounter
imagemounter/volume_system.py
VolumeSystem._determine_auto_detection_method
def _determine_auto_detection_method(): """Return the detection method to use when the detection method is 'auto'""" if dependencies.pytsk3.is_available: return 'pytsk3' elif dependencies.mmls.is_available: return 'mmls' elif dependencies.parted.is_available: return 'parted' else: raise PrerequisiteFailedError("No valid detection method is installed.")
python
def _determine_auto_detection_method(): """Return the detection method to use when the detection method is 'auto'""" if dependencies.pytsk3.is_available: return 'pytsk3' elif dependencies.mmls.is_available: return 'mmls' elif dependencies.parted.is_available: return 'parted' else: raise PrerequisiteFailedError("No valid detection method is installed.")
Return the detection method to use when the detection method is 'auto
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L123-L133
ralphje/imagemounter
imagemounter/volume_system.py
VolumeSystem._load_disktype_data
def _load_disktype_data(self): """Calls the :command:`disktype` command and obtains the disk GUID from GPT volume systems. As we are running the tool anyway, the label is also extracted from the tool if it is not yet set. The disktype data is only loaded and not assigned to volumes yet. """ if not _util.command_exists('disktype'): logger.warning("disktype not installed, could not detect volume type") return None disktype = _util.check_output_(['disktype', self.parent.get_raw_path()]).strip() current_partition = None for line in disktype.splitlines(): if not line: continue # noinspection PyBroadException try: line = line.strip() find_partition_nr = re.match(r"^Partition (\d+):", line) if find_partition_nr: current_partition = int(find_partition_nr.group(1)) elif current_partition is not None: if line.startswith("Type ") and "GUID" in line: self._disktype[current_partition]['guid'] = \ line[line.index('GUID') + 5:-1].strip() # output is between () elif line.startswith("Partition Name "): self._disktype[current_partition]['label'] = \ line[line.index('Name ') + 6:-1].strip() # output is between "" except Exception: logger.exception("Error while parsing disktype output") return
python
def _load_disktype_data(self): """Calls the :command:`disktype` command and obtains the disk GUID from GPT volume systems. As we are running the tool anyway, the label is also extracted from the tool if it is not yet set. The disktype data is only loaded and not assigned to volumes yet. """ if not _util.command_exists('disktype'): logger.warning("disktype not installed, could not detect volume type") return None disktype = _util.check_output_(['disktype', self.parent.get_raw_path()]).strip() current_partition = None for line in disktype.splitlines(): if not line: continue # noinspection PyBroadException try: line = line.strip() find_partition_nr = re.match(r"^Partition (\d+):", line) if find_partition_nr: current_partition = int(find_partition_nr.group(1)) elif current_partition is not None: if line.startswith("Type ") and "GUID" in line: self._disktype[current_partition]['guid'] = \ line[line.index('GUID') + 5:-1].strip() # output is between () elif line.startswith("Partition Name "): self._disktype[current_partition]['label'] = \ line[line.index('Name ') + 6:-1].strip() # output is between "" except Exception: logger.exception("Error while parsing disktype output") return
Calls the :command:`disktype` command and obtains the disk GUID from GPT volume systems. As we are running the tool anyway, the label is also extracted from the tool if it is not yet set. The disktype data is only loaded and not assigned to volumes yet.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L140-L173
ralphje/imagemounter
imagemounter/volume_system.py
VolumeSystem._assign_disktype_data
def _assign_disktype_data(self, volume, slot=None): """Assigns cached disktype data to a volume.""" if slot is None: slot = volume.slot if slot in self._disktype: data = self._disktype[slot] if not volume.info.get('guid') and 'guid' in data: volume.info['guid'] = data['guid'] if not volume.info.get('label') and 'label' in data: volume.info['label'] = data['label']
python
def _assign_disktype_data(self, volume, slot=None): """Assigns cached disktype data to a volume.""" if slot is None: slot = volume.slot if slot in self._disktype: data = self._disktype[slot] if not volume.info.get('guid') and 'guid' in data: volume.info['guid'] = data['guid'] if not volume.info.get('label') and 'label' in data: volume.info['label'] = data['label']
Assigns cached disktype data to a volume.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L175-L185
ralphje/imagemounter
imagemounter/volume_system.py
VolumeDetector._format_index
def _format_index(self, volume_system, idx): """Returns a formatted index given the disk index idx.""" if volume_system.parent.index is not None: return '{0}.{1}'.format(volume_system.parent.index, idx) else: return str(idx)
python
def _format_index(self, volume_system, idx): """Returns a formatted index given the disk index idx.""" if volume_system.parent.index is not None: return '{0}.{1}'.format(volume_system.parent.index, idx) else: return str(idx)
Returns a formatted index given the disk index idx.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L199-L205
ralphje/imagemounter
imagemounter/volume_system.py
SingleVolumeDetector.detect
def detect(self, volume_system, vstype='detect'): """'Detects' a single volume. It should not be called other than from a :class:`Disk`.""" volume = volume_system._make_single_subvolume(offset=0) is_directory = os.path.isdir(volume_system.parent.get_raw_path()) if is_directory: filesize = _util.check_output_(['du', '-scDb', volume_system.parent.get_raw_path()]).strip() if filesize: volume.size = int(filesize.splitlines()[-1].split()[0]) else: description = _util.check_output_(['file', '-sL', volume_system.parent.get_raw_path()]).strip() if description: # description is the part after the :, until the first comma volume.info['fsdescription'] = description.split(': ', 1)[1].split(',', 1)[0].strip() if 'size' in description: volume.size = int(re.findall(r'size:? (\d+)', description)[0]) else: volume.size = os.path.getsize(volume_system.parent.get_raw_path()) volume.flag = 'alloc' volume_system.volume_source = 'single' volume_system._assign_disktype_data(volume) yield volume
python
def detect(self, volume_system, vstype='detect'): """'Detects' a single volume. It should not be called other than from a :class:`Disk`.""" volume = volume_system._make_single_subvolume(offset=0) is_directory = os.path.isdir(volume_system.parent.get_raw_path()) if is_directory: filesize = _util.check_output_(['du', '-scDb', volume_system.parent.get_raw_path()]).strip() if filesize: volume.size = int(filesize.splitlines()[-1].split()[0]) else: description = _util.check_output_(['file', '-sL', volume_system.parent.get_raw_path()]).strip() if description: # description is the part after the :, until the first comma volume.info['fsdescription'] = description.split(': ', 1)[1].split(',', 1)[0].strip() if 'size' in description: volume.size = int(re.findall(r'size:? (\d+)', description)[0]) else: volume.size = os.path.getsize(volume_system.parent.get_raw_path()) volume.flag = 'alloc' volume_system.volume_source = 'single' volume_system._assign_disktype_data(volume) yield volume
Detects' a single volume. It should not be called other than from a :class:`Disk`.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L212-L235
ralphje/imagemounter
imagemounter/volume_system.py
Pytsk3VolumeDetector._find_volumes
def _find_volumes(self, volume_system, vstype='detect'): """Finds all volumes based on the pytsk3 library.""" try: # noinspection PyUnresolvedReferences import pytsk3 except ImportError: logger.error("pytsk3 not installed, could not detect volumes") raise ModuleNotFoundError("pytsk3") baseimage = None try: # ewf raw image is now available on base mountpoint # either as ewf1 file or as .dd file raw_path = volume_system.parent.get_raw_path() # noinspection PyBroadException try: baseimage = pytsk3.Img_Info(raw_path) except Exception: logger.error("Failed retrieving image info (possible empty image).", exc_info=True) return [] try: volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_' + vstype.upper()), volume_system.parent.offset // volume_system.disk.block_size) volume_system.volume_source = 'multi' return volumes except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if "(GPT or DOS at 0)" in str(e) and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: TSK couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_GPT')) volume_system.volume_source = 'multi' return volumes except Exception as e: logger.exception("Failed retrieving image info (possible empty image).") raise SubsystemError(e) else: logger.exception("Failed retrieving image info (possible empty image).") raise SubsystemError(e) finally: if baseimage: baseimage.close() del baseimage
python
def _find_volumes(self, volume_system, vstype='detect'): """Finds all volumes based on the pytsk3 library.""" try: # noinspection PyUnresolvedReferences import pytsk3 except ImportError: logger.error("pytsk3 not installed, could not detect volumes") raise ModuleNotFoundError("pytsk3") baseimage = None try: # ewf raw image is now available on base mountpoint # either as ewf1 file or as .dd file raw_path = volume_system.parent.get_raw_path() # noinspection PyBroadException try: baseimage = pytsk3.Img_Info(raw_path) except Exception: logger.error("Failed retrieving image info (possible empty image).", exc_info=True) return [] try: volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_' + vstype.upper()), volume_system.parent.offset // volume_system.disk.block_size) volume_system.volume_source = 'multi' return volumes except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if "(GPT or DOS at 0)" in str(e) and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: TSK couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_GPT')) volume_system.volume_source = 'multi' return volumes except Exception as e: logger.exception("Failed retrieving image info (possible empty image).") raise SubsystemError(e) else: logger.exception("Failed retrieving image info (possible empty image).") raise SubsystemError(e) finally: if baseimage: baseimage.close() del baseimage
Finds all volumes based on the pytsk3 library.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L241-L288
ralphje/imagemounter
imagemounter/volume_system.py
Pytsk3VolumeDetector.detect
def detect(self, volume_system, vstype='detect'): """Generator that mounts every partition of this image and yields the mountpoint.""" # Loop over all volumes in image. for p in self._find_volumes(volume_system, vstype): import pytsk3 volume = volume_system._make_subvolume( index=self._format_index(volume_system, p.addr), offset=p.start * volume_system.disk.block_size, size=p.len * volume_system.disk.block_size ) # Fill volume with more information volume.info['fsdescription'] = p.desc.strip().decode('utf-8') if p.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC: volume.flag = 'alloc' volume.slot = _util.determine_slot(p.table_num, p.slot_num) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(p.start, p.len, volume.info['fsdescription'])) elif p.flags == pytsk3.TSK_VS_PART_FLAG_UNALLOC: volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1} ".format(p.start, p.len)) elif p.flags == pytsk3.TSK_VS_PART_FLAG_META: volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1} ".format(p.start, p.len)) yield volume
python
def detect(self, volume_system, vstype='detect'): """Generator that mounts every partition of this image and yields the mountpoint.""" # Loop over all volumes in image. for p in self._find_volumes(volume_system, vstype): import pytsk3 volume = volume_system._make_subvolume( index=self._format_index(volume_system, p.addr), offset=p.start * volume_system.disk.block_size, size=p.len * volume_system.disk.block_size ) # Fill volume with more information volume.info['fsdescription'] = p.desc.strip().decode('utf-8') if p.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC: volume.flag = 'alloc' volume.slot = _util.determine_slot(p.table_num, p.slot_num) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(p.start, p.len, volume.info['fsdescription'])) elif p.flags == pytsk3.TSK_VS_PART_FLAG_UNALLOC: volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1} ".format(p.start, p.len)) elif p.flags == pytsk3.TSK_VS_PART_FLAG_META: volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1} ".format(p.start, p.len)) yield volume
Generator that mounts every partition of this image and yields the mountpoint.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L291-L319
ralphje/imagemounter
imagemounter/volume_system.py
PartedVolumeDetector.detect
def detect(self, volume_system, vstype='detect'): """Finds and mounts all volumes based on parted. :param VolumeSystem volume_system: The volume system. """ # for some reason, parted does not properly return extended volume types in its machine # output, so we need to execute it twice. meta_volumes = [] # noinspection PyBroadException try: output = _util.check_output_(['parted', volume_system.parent.get_raw_path(), 'print'], stdin=subprocess.PIPE) for line in output.splitlines(): if 'extended' in line: meta_volumes.append(int(line.split()[0])) except Exception: logger.exception("Failed executing parted command.") # skip detection of meta volumes # noinspection PyBroadException try: # parted does not support passing in the vstype. It either works, or it doesn't. cmd = ['parted', volume_system.parent.get_raw_path(), '-sm', 'unit s', 'print free'] output = _util.check_output_(cmd, stdin=subprocess.PIPE) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing parted command") raise SubsystemError(e) num = 0 for line in output.splitlines(): if line.startswith("Warning") or not line or ':' not in line or line.startswith(self.parent.get_raw_path()): continue line = line[:-1] # remove last ; try: slot, start, end, length, description = line.split(':', 4) if ':' in description: description, label, flags = description.split(':', 2) else: description, label, flags = description, '', '' try: slot = int(slot) except ValueError: continue volume = volume_system._make_subvolume( index=self._format_index(volume_system, num), offset=int(start[:-1]) * volume_system.disk.block_size, # remove last s size=int(length[:-1]) * volume_system.disk.block_size) volume.info['fsdescription'] = description if label: volume.info['label'] = label if flags: volume.info['parted_flags'] = flags # TODO: detection of meta volumes if description == 'free': volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start[:-1], length[:-1])) elif slot in meta_volumes: volume.flag = 'meta' volume.slot = slot logger.info("Found meta volume: block offset: {0}, length: {1}".format(start[:-1], length[:-1])) else: volume.flag = 'alloc' volume.slot = slot volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} " .format(start[:-1], length[:-1], volume.info['fsdescription'])) except AttributeError: logger.exception("Error while parsing parted output") continue num += 1 yield volume
python
def detect(self, volume_system, vstype='detect'): """Finds and mounts all volumes based on parted. :param VolumeSystem volume_system: The volume system. """ # for some reason, parted does not properly return extended volume types in its machine # output, so we need to execute it twice. meta_volumes = [] # noinspection PyBroadException try: output = _util.check_output_(['parted', volume_system.parent.get_raw_path(), 'print'], stdin=subprocess.PIPE) for line in output.splitlines(): if 'extended' in line: meta_volumes.append(int(line.split()[0])) except Exception: logger.exception("Failed executing parted command.") # skip detection of meta volumes # noinspection PyBroadException try: # parted does not support passing in the vstype. It either works, or it doesn't. cmd = ['parted', volume_system.parent.get_raw_path(), '-sm', 'unit s', 'print free'] output = _util.check_output_(cmd, stdin=subprocess.PIPE) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing parted command") raise SubsystemError(e) num = 0 for line in output.splitlines(): if line.startswith("Warning") or not line or ':' not in line or line.startswith(self.parent.get_raw_path()): continue line = line[:-1] # remove last ; try: slot, start, end, length, description = line.split(':', 4) if ':' in description: description, label, flags = description.split(':', 2) else: description, label, flags = description, '', '' try: slot = int(slot) except ValueError: continue volume = volume_system._make_subvolume( index=self._format_index(volume_system, num), offset=int(start[:-1]) * volume_system.disk.block_size, # remove last s size=int(length[:-1]) * volume_system.disk.block_size) volume.info['fsdescription'] = description if label: volume.info['label'] = label if flags: volume.info['parted_flags'] = flags # TODO: detection of meta volumes if description == 'free': volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start[:-1], length[:-1])) elif slot in meta_volumes: volume.flag = 'meta' volume.slot = slot logger.info("Found meta volume: block offset: {0}, length: {1}".format(start[:-1], length[:-1])) else: volume.flag = 'alloc' volume.slot = slot volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} " .format(start[:-1], length[:-1], volume.info['fsdescription'])) except AttributeError: logger.exception("Error while parsing parted output") continue num += 1 yield volume
Finds and mounts all volumes based on parted. :param VolumeSystem volume_system: The volume system.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L326-L404
ralphje/imagemounter
imagemounter/volume_system.py
MmlsVolumeDetector.detect
def detect(self, volume_system, vstype='detect'): """Finds and mounts all volumes based on mmls.""" try: cmd = ['mmls'] if volume_system.parent.offset: cmd.extend(['-o', str(volume_system.parent.offset // volume_system.disk.block_size)]) if vstype in ('dos', 'mac', 'bsd', 'sun', 'gpt'): cmd.extend(['-t', vstype]) cmd.append(volume_system.parent.get_raw_path()) output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()] output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing mmls command") raise SubsystemError(e) else: logger.exception("Failed executing mmls command") raise SubsystemError(e) output = output.split("Description", 1)[-1] for line in output.splitlines(): if not line: continue # noinspection PyBroadException try: values = line.split(None, 5) # sometimes there are only 5 elements available description = '' index, slot, start, end, length = values[0:5] if len(values) > 5: description = values[5] volume = volume_system._make_subvolume( index=self._format_index(volume_system, int(index[:-1])), offset=int(start) * volume_system.disk.block_size, size=int(length) * volume_system.disk.block_size ) volume.info['fsdescription'] = description except Exception: logger.exception("Error while parsing mmls output") continue if slot.lower() == 'meta': volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length)) elif slot.lower().startswith('-----'): volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length)) else: volume.flag = 'alloc' if ":" in slot: volume.slot = _util.determine_slot(*slot.split(':')) else: volume.slot = _util.determine_slot(-1, slot) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length, volume.info['fsdescription'])) yield volume
python
def detect(self, volume_system, vstype='detect'): """Finds and mounts all volumes based on mmls.""" try: cmd = ['mmls'] if volume_system.parent.offset: cmd.extend(['-o', str(volume_system.parent.offset // volume_system.disk.block_size)]) if vstype in ('dos', 'mac', 'bsd', 'sun', 'gpt'): cmd.extend(['-t', vstype]) cmd.append(volume_system.parent.get_raw_path()) output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()] output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing mmls command") raise SubsystemError(e) else: logger.exception("Failed executing mmls command") raise SubsystemError(e) output = output.split("Description", 1)[-1] for line in output.splitlines(): if not line: continue # noinspection PyBroadException try: values = line.split(None, 5) # sometimes there are only 5 elements available description = '' index, slot, start, end, length = values[0:5] if len(values) > 5: description = values[5] volume = volume_system._make_subvolume( index=self._format_index(volume_system, int(index[:-1])), offset=int(start) * volume_system.disk.block_size, size=int(length) * volume_system.disk.block_size ) volume.info['fsdescription'] = description except Exception: logger.exception("Error while parsing mmls output") continue if slot.lower() == 'meta': volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length)) elif slot.lower().startswith('-----'): volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length)) else: volume.flag = 'alloc' if ":" in slot: volume.slot = _util.determine_slot(*slot.split(':')) else: volume.slot = _util.determine_slot(-1, slot) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length, volume.info['fsdescription'])) yield volume
Finds and mounts all volumes based on mmls.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L411-L482
ralphje/imagemounter
imagemounter/volume_system.py
VssVolumeDetector.detect
def detect(self, volume_system, vstype='detect'): """Detect volume shadow copy volumes in the specified path.""" path = volume_system.parent._paths['vss'] try: volume_info = _util.check_output_(["vshadowinfo", "-o", str(volume_system.parent.offset), volume_system.parent.get_raw_path()]) except Exception as e: logger.exception("Failed obtaining info from the volume shadow copies.") raise SubsystemError(e) current_store = None for line in volume_info.splitlines(): line = line.strip() if line.startswith("Store:"): idx = line.split(":")[-1].strip() current_store = volume_system._make_subvolume( index=self._format_index(volume_system, idx), flag='alloc', offset=0 ) current_store._paths['vss_store'] = os.path.join(path, 'vss' + idx) current_store.info['fsdescription'] = 'VSS Store' elif line.startswith("Volume size"): current_store.size = int(line.split(":")[-1].strip().split()[0]) elif line.startswith("Creation time"): current_store.info['creation_time'] = line.split(":")[-1].strip() return volume_system.volumes
python
def detect(self, volume_system, vstype='detect'): """Detect volume shadow copy volumes in the specified path.""" path = volume_system.parent._paths['vss'] try: volume_info = _util.check_output_(["vshadowinfo", "-o", str(volume_system.parent.offset), volume_system.parent.get_raw_path()]) except Exception as e: logger.exception("Failed obtaining info from the volume shadow copies.") raise SubsystemError(e) current_store = None for line in volume_info.splitlines(): line = line.strip() if line.startswith("Store:"): idx = line.split(":")[-1].strip() current_store = volume_system._make_subvolume( index=self._format_index(volume_system, idx), flag='alloc', offset=0 ) current_store._paths['vss_store'] = os.path.join(path, 'vss' + idx) current_store.info['fsdescription'] = 'VSS Store' elif line.startswith("Volume size"): current_store.size = int(line.split(":")[-1].strip().split()[0]) elif line.startswith("Creation time"): current_store.info['creation_time'] = line.split(":")[-1].strip() return volume_system.volumes
Detect volume shadow copy volumes in the specified path.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L490-L517
ralphje/imagemounter
imagemounter/volume_system.py
LvmVolumeDetector.detect
def detect(self, volume_system, vstype='detect'): """Gather information about lvolumes, gathering their label, size and raw path""" volume_group = volume_system.parent.info.get('volume_group') result = _util.check_output_(["lvm", "lvdisplay", volume_group]) cur_v = None for l in result.splitlines(): if "--- Logical volume ---" in l: cur_v = volume_system._make_subvolume( index=self._format_index(volume_system, len(volume_system)), flag='alloc' ) cur_v.info['fsdescription'] = 'Logical Volume' if "LV Name" in l: cur_v.info['label'] = l.replace("LV Name", "").strip() if "LV Size" in l: size, unit = l.replace("LV Size", "").strip().split(" ", 1) cur_v.size = int(float(size.replace(',', '.')) * {'KiB': 1024, 'MiB': 1024 ** 2, 'GiB': 1024 ** 3, 'TiB': 1024 ** 4}.get(unit, 1)) if "LV Path" in l: cur_v._paths['lv'] = l.replace("LV Path", "").strip() cur_v.offset = 0 logger.info("{0} volumes found".format(len(volume_system))) volume_system.volume_source = 'multi' return volume_system.volumes
python
def detect(self, volume_system, vstype='detect'): """Gather information about lvolumes, gathering their label, size and raw path""" volume_group = volume_system.parent.info.get('volume_group') result = _util.check_output_(["lvm", "lvdisplay", volume_group]) cur_v = None for l in result.splitlines(): if "--- Logical volume ---" in l: cur_v = volume_system._make_subvolume( index=self._format_index(volume_system, len(volume_system)), flag='alloc' ) cur_v.info['fsdescription'] = 'Logical Volume' if "LV Name" in l: cur_v.info['label'] = l.replace("LV Name", "").strip() if "LV Size" in l: size, unit = l.replace("LV Size", "").strip().split(" ", 1) cur_v.size = int(float(size.replace(',', '.')) * {'KiB': 1024, 'MiB': 1024 ** 2, 'GiB': 1024 ** 3, 'TiB': 1024 ** 4}.get(unit, 1)) if "LV Path" in l: cur_v._paths['lv'] = l.replace("LV Path", "").strip() cur_v.offset = 0 logger.info("{0} volumes found".format(len(volume_system))) volume_system.volume_source = 'multi' return volume_system.volumes
Gather information about lvolumes, gathering their label, size and raw path
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L525-L551
ralphje/imagemounter
imagemounter/volume.py
Volume._get_fstype_from_parser
def _get_fstype_from_parser(self, fstype=None): """Load fstype information from the parser instance.""" if fstype: self.fstype = fstype elif self.index in self.disk.parser.fstypes: self.fstype = self.disk.parser.fstypes[self.index] elif '*' in self.disk.parser.fstypes: self.fstype = self.disk.parser.fstypes['*'] elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None: self.fstype = "?" + self.disk.parser.fstypes['?'] else: self.fstype = "" if self.fstype in VOLUME_SYSTEM_TYPES: self.volumes.vstype = self.fstype self.fstype = 'volumesystem' # convert fstype from string to a FileSystemType object if not isinstance(self.fstype, filesystems.FileSystemType): if self.fstype.startswith("?"): fallback = FILE_SYSTEM_TYPES[self.fstype[1:]] self.fstype = filesystems.FallbackFileSystemType(fallback) else: self.fstype = FILE_SYSTEM_TYPES[self.fstype]
python
def _get_fstype_from_parser(self, fstype=None): """Load fstype information from the parser instance.""" if fstype: self.fstype = fstype elif self.index in self.disk.parser.fstypes: self.fstype = self.disk.parser.fstypes[self.index] elif '*' in self.disk.parser.fstypes: self.fstype = self.disk.parser.fstypes['*'] elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None: self.fstype = "?" + self.disk.parser.fstypes['?'] else: self.fstype = "" if self.fstype in VOLUME_SYSTEM_TYPES: self.volumes.vstype = self.fstype self.fstype = 'volumesystem' # convert fstype from string to a FileSystemType object if not isinstance(self.fstype, filesystems.FileSystemType): if self.fstype.startswith("?"): fallback = FILE_SYSTEM_TYPES[self.fstype[1:]] self.fstype = filesystems.FallbackFileSystemType(fallback) else: self.fstype = FILE_SYSTEM_TYPES[self.fstype]
Load fstype information from the parser instance.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L99-L122
ralphje/imagemounter
imagemounter/volume.py
Volume.get_description
def get_description(self, with_size=True, with_index=True): """Obtains a generic description of the volume, containing the file system type, index, label and NTFS version. If *with_size* is provided, the volume size is also included. """ desc = '' if with_size and self.size: desc += '{0} '.format(self.get_formatted_size()) s = self.info.get('statfstype') or self.info.get('fsdescription') or '-' if with_index: desc += '{1}:{0}'.format(s, self.index) else: desc += s if self.info.get('label'): desc += ' {0}'.format(self.info.get('label')) if self.info.get('version'): # NTFS desc += ' [{0}]'.format(self.info.get('version')) return desc
python
def get_description(self, with_size=True, with_index=True): """Obtains a generic description of the volume, containing the file system type, index, label and NTFS version. If *with_size* is provided, the volume size is also included. """ desc = '' if with_size and self.size: desc += '{0} '.format(self.get_formatted_size()) s = self.info.get('statfstype') or self.info.get('fsdescription') or '-' if with_index: desc += '{1}:{0}'.format(s, self.index) else: desc += s if self.info.get('label'): desc += ' {0}'.format(self.info.get('label')) if self.info.get('version'): # NTFS desc += ' [{0}]'.format(self.info.get('version')) return desc
Obtains a generic description of the volume, containing the file system type, index, label and NTFS version. If *with_size* is provided, the volume size is also included.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L124-L146
ralphje/imagemounter
imagemounter/volume.py
Volume.get_formatted_size
def get_formatted_size(self): """Obtains the size of the volume in a human-readable format (i.e. in TiBs, GiBs or MiBs).""" if self.size is not None: if self.size < 1024: return "{0} B".format(self.size) elif self.size < 1024 ** 2: return "{0} KiB".format(round(self.size / 1024, 2)) elif self.size < 1024 ** 3: return "{0} MiB".format(round(self.size / 1024 ** 2, 2)) elif self.size < 1024 ** 4: return "{0} GiB".format(round(self.size / 1024 ** 3, 2)) else: return "{0} TiB".format(round(self.size / 1024 ** 4, 2)) else: return self.size
python
def get_formatted_size(self): """Obtains the size of the volume in a human-readable format (i.e. in TiBs, GiBs or MiBs).""" if self.size is not None: if self.size < 1024: return "{0} B".format(self.size) elif self.size < 1024 ** 2: return "{0} KiB".format(round(self.size / 1024, 2)) elif self.size < 1024 ** 3: return "{0} MiB".format(round(self.size / 1024 ** 2, 2)) elif self.size < 1024 ** 4: return "{0} GiB".format(round(self.size / 1024 ** 3, 2)) else: return "{0} TiB".format(round(self.size / 1024 ** 4, 2)) else: return self.size
Obtains the size of the volume in a human-readable format (i.e. in TiBs, GiBs or MiBs).
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L148-L163
ralphje/imagemounter
imagemounter/volume.py
Volume._get_blkid_type
def _get_blkid_type(self): """Retrieves the FS type from the blkid command.""" try: result = _util.check_output_(['blkid', '-p', '-O', str(self.offset), self.get_raw_path()]) if not result: return None # noinspection PyTypeChecker blkid_result = dict(re.findall(r'([A-Z]+)="(.+?)"', result)) self.info['blkid_data'] = blkid_result if 'PTTYPE' in blkid_result and 'TYPE' not in blkid_result: return blkid_result.get('PTTYPE') else: return blkid_result.get('TYPE') except Exception: return None
python
def _get_blkid_type(self): """Retrieves the FS type from the blkid command.""" try: result = _util.check_output_(['blkid', '-p', '-O', str(self.offset), self.get_raw_path()]) if not result: return None # noinspection PyTypeChecker blkid_result = dict(re.findall(r'([A-Z]+)="(.+?)"', result)) self.info['blkid_data'] = blkid_result if 'PTTYPE' in blkid_result and 'TYPE' not in blkid_result: return blkid_result.get('PTTYPE') else: return blkid_result.get('TYPE') except Exception: return None
Retrieves the FS type from the blkid command.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L166-L184
ralphje/imagemounter
imagemounter/volume.py
Volume._get_magic_type
def _get_magic_type(self): """Checks the volume for its magic bytes and returns the magic.""" try: with io.open(self.disk.get_fs_path(), "rb") as file: file.seek(self.offset) fheader = file.read(min(self.size, 4096) if self.size else 4096) except IOError: logger.exception("Failed reading first 4K bytes from volume.") return None # TODO fallback to img-cat image -s blocknum | file - # if we were able to load the module magic try: # noinspection PyUnresolvedReferences import magic if hasattr(magic, 'from_buffer'): # using https://github.com/ahupp/python-magic logger.debug("Using python-magic Python package for file type magic") result = magic.from_buffer(fheader).decode() self.info['magic_data'] = result return result elif hasattr(magic, 'open'): # using Magic file extensions by Rueben Thomas (Ubuntu python-magic module) logger.debug("Using python-magic system package for file type magic") ms = magic.open(magic.NONE) ms.load() result = ms.buffer(fheader) ms.close() self.info['magic_data'] = result return result else: logger.warning("The python-magic module is not available, but another module named magic was found.") except ImportError: logger.warning("The python-magic module is not available.") except AttributeError: logger.warning("The python-magic module is not available, but another module named magic was found.") return None
python
def _get_magic_type(self): """Checks the volume for its magic bytes and returns the magic.""" try: with io.open(self.disk.get_fs_path(), "rb") as file: file.seek(self.offset) fheader = file.read(min(self.size, 4096) if self.size else 4096) except IOError: logger.exception("Failed reading first 4K bytes from volume.") return None # TODO fallback to img-cat image -s blocknum | file - # if we were able to load the module magic try: # noinspection PyUnresolvedReferences import magic if hasattr(magic, 'from_buffer'): # using https://github.com/ahupp/python-magic logger.debug("Using python-magic Python package for file type magic") result = magic.from_buffer(fheader).decode() self.info['magic_data'] = result return result elif hasattr(magic, 'open'): # using Magic file extensions by Rueben Thomas (Ubuntu python-magic module) logger.debug("Using python-magic system package for file type magic") ms = magic.open(magic.NONE) ms.load() result = ms.buffer(fheader) ms.close() self.info['magic_data'] = result return result else: logger.warning("The python-magic module is not available, but another module named magic was found.") except ImportError: logger.warning("The python-magic module is not available.") except AttributeError: logger.warning("The python-magic module is not available, but another module named magic was found.") return None
Checks the volume for its magic bytes and returns the magic.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L187-L228
ralphje/imagemounter
imagemounter/volume.py
Volume.get_raw_path
def get_raw_path(self, include_self=False): """Retrieves the base mount path of the volume. Typically equals to :func:`Disk.get_fs_path` but may also be the path to a logical volume. This is used to determine the source path for a mount call. The value returned is normally based on the parent's paths, e.g. if this volume is mounted to a more specific path, only its children return the more specific path, this volume itself will keep returning the same path. This makes for consistent use of the offset attribute. If you do not need this behaviour, you can override this with the include_self argument. This behavior, however, is not retained for paths that directly affect the volume itself, not the child volumes. This includes VSS stores and LV volumes. """ v = self if not include_self: # lv / vss_store are exceptions, as it covers the volume itself, not the child volume if v._paths.get('lv'): return v._paths['lv'] elif v._paths.get('vss_store'): return v._paths['vss_store'] elif v.parent and v.parent != self.disk: v = v.parent else: return self.disk.get_fs_path() while True: if v._paths.get('lv'): return v._paths['lv'] elif v._paths.get('bde'): return v._paths['bde'] + '/bde1' elif v._paths.get('luks'): return '/dev/mapper/' + v._paths['luks'] elif v._paths.get('md'): return v._paths['md'] elif v._paths.get('vss_store'): return v._paths['vss_store'] # Only if the volume has a parent that is not a disk, we try to check the parent for a location. if v.parent and v.parent != self.disk: v = v.parent else: break return self.disk.get_fs_path()
python
def get_raw_path(self, include_self=False): """Retrieves the base mount path of the volume. Typically equals to :func:`Disk.get_fs_path` but may also be the path to a logical volume. This is used to determine the source path for a mount call. The value returned is normally based on the parent's paths, e.g. if this volume is mounted to a more specific path, only its children return the more specific path, this volume itself will keep returning the same path. This makes for consistent use of the offset attribute. If you do not need this behaviour, you can override this with the include_self argument. This behavior, however, is not retained for paths that directly affect the volume itself, not the child volumes. This includes VSS stores and LV volumes. """ v = self if not include_self: # lv / vss_store are exceptions, as it covers the volume itself, not the child volume if v._paths.get('lv'): return v._paths['lv'] elif v._paths.get('vss_store'): return v._paths['vss_store'] elif v.parent and v.parent != self.disk: v = v.parent else: return self.disk.get_fs_path() while True: if v._paths.get('lv'): return v._paths['lv'] elif v._paths.get('bde'): return v._paths['bde'] + '/bde1' elif v._paths.get('luks'): return '/dev/mapper/' + v._paths['luks'] elif v._paths.get('md'): return v._paths['md'] elif v._paths.get('vss_store'): return v._paths['vss_store'] # Only if the volume has a parent that is not a disk, we try to check the parent for a location. if v.parent and v.parent != self.disk: v = v.parent else: break return self.disk.get_fs_path()
Retrieves the base mount path of the volume. Typically equals to :func:`Disk.get_fs_path` but may also be the path to a logical volume. This is used to determine the source path for a mount call. The value returned is normally based on the parent's paths, e.g. if this volume is mounted to a more specific path, only its children return the more specific path, this volume itself will keep returning the same path. This makes for consistent use of the offset attribute. If you do not need this behaviour, you can override this with the include_self argument. This behavior, however, is not retained for paths that directly affect the volume itself, not the child volumes. This includes VSS stores and LV volumes.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L230-L272
ralphje/imagemounter
imagemounter/volume.py
Volume.get_safe_label
def get_safe_label(self): """Returns a label that is safe to add to a path in the mountpoint for this volume.""" if self.info.get('label') == '/': return 'root' suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else "" if suffix and suffix[0] == '_': suffix = suffix[1:] if len(suffix) > 2 and suffix[-1] == '_': suffix = suffix[:-1] return suffix
python
def get_safe_label(self): """Returns a label that is safe to add to a path in the mountpoint for this volume.""" if self.info.get('label') == '/': return 'root' suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else "" if suffix and suffix[0] == '_': suffix = suffix[1:] if len(suffix) > 2 and suffix[-1] == '_': suffix = suffix[:-1] return suffix
Returns a label that is safe to add to a path in the mountpoint for this volume.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L274-L285
ralphje/imagemounter
imagemounter/volume.py
Volume.carve
def carve(self, freespace=True): """Call this method to carve the free space of the volume for (deleted) files. Note that photorec has its own interface that temporarily takes over the shell. :param freespace: indicates whether the entire volume should be carved (False) or only the free space (True) :type freespace: bool :return: string to the path where carved data is available :raises CommandNotFoundError: if the underlying command does not exist :raises SubsystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available :raises NoLoopbackAvailableError: if there is no loopback available (only when volume has no slot number) """ self._make_mountpoint(var_name='carve', suffix="carve", in_paths=True) # if no slot, we need to make a loopback that we can use to carve the volume loopback_was_created_for_carving = False if not self.slot: if not self.loopback: self._find_loopback() # Can't carve if volume has no slot number and can't be mounted on loopback. loopback_was_created_for_carving = True # noinspection PyBroadException try: _util.check_call_(["photorec", "/d", self._paths['carve'] + os.sep, "/cmd", self.loopback, ("freespace," if freespace else "") + "search"]) # clean out the loop device if we created it specifically for carving if loopback_was_created_for_carving: # noinspection PyBroadException try: _util.check_call_(['losetup', '-d', self.loopback]) except Exception: pass else: self.loopback = "" return self._paths['carve'] except Exception as e: logger.exception("Failed carving the volume.") raise SubsystemError(e) else: # noinspection PyBroadException try: _util.check_call_(["photorec", "/d", self._paths['carve'] + os.sep, "/cmd", self.get_raw_path(), str(self.slot) + (",freespace" if freespace else "") + ",search"]) return self._paths['carve'] except Exception as e: logger.exception("Failed carving the volume.") raise SubsystemError(e)
python
def carve(self, freespace=True): """Call this method to carve the free space of the volume for (deleted) files. Note that photorec has its own interface that temporarily takes over the shell. :param freespace: indicates whether the entire volume should be carved (False) or only the free space (True) :type freespace: bool :return: string to the path where carved data is available :raises CommandNotFoundError: if the underlying command does not exist :raises SubsystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available :raises NoLoopbackAvailableError: if there is no loopback available (only when volume has no slot number) """ self._make_mountpoint(var_name='carve', suffix="carve", in_paths=True) # if no slot, we need to make a loopback that we can use to carve the volume loopback_was_created_for_carving = False if not self.slot: if not self.loopback: self._find_loopback() # Can't carve if volume has no slot number and can't be mounted on loopback. loopback_was_created_for_carving = True # noinspection PyBroadException try: _util.check_call_(["photorec", "/d", self._paths['carve'] + os.sep, "/cmd", self.loopback, ("freespace," if freespace else "") + "search"]) # clean out the loop device if we created it specifically for carving if loopback_was_created_for_carving: # noinspection PyBroadException try: _util.check_call_(['losetup', '-d', self.loopback]) except Exception: pass else: self.loopback = "" return self._paths['carve'] except Exception as e: logger.exception("Failed carving the volume.") raise SubsystemError(e) else: # noinspection PyBroadException try: _util.check_call_(["photorec", "/d", self._paths['carve'] + os.sep, "/cmd", self.get_raw_path(), str(self.slot) + (",freespace" if freespace else "") + ",search"]) return self._paths['carve'] except Exception as e: logger.exception("Failed carving the volume.") raise SubsystemError(e)
Call this method to carve the free space of the volume for (deleted) files. Note that photorec has its own interface that temporarily takes over the shell. :param freespace: indicates whether the entire volume should be carved (False) or only the free space (True) :type freespace: bool :return: string to the path where carved data is available :raises CommandNotFoundError: if the underlying command does not exist :raises SubsystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available :raises NoLoopbackAvailableError: if there is no loopback available (only when volume has no slot number)
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L288-L339
ralphje/imagemounter
imagemounter/volume.py
Volume.detect_volume_shadow_copies
def detect_volume_shadow_copies(self): """Method to call vshadowmount and mount NTFS volume shadow copies. :return: iterable with the :class:`Volume` objects of the VSS :raises CommandNotFoundError: if the underlying command does not exist :raises SubSystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available """ self._make_mountpoint(var_name='vss', suffix="vss", in_paths=True) try: _util.check_call_(["vshadowmount", "-o", str(self.offset), self.get_raw_path(), self._paths['vss']]) except Exception as e: logger.exception("Failed mounting the volume shadow copies.") raise SubsystemError(e) else: return self.volumes.detect_volumes(vstype='vss')
python
def detect_volume_shadow_copies(self): """Method to call vshadowmount and mount NTFS volume shadow copies. :return: iterable with the :class:`Volume` objects of the VSS :raises CommandNotFoundError: if the underlying command does not exist :raises SubSystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available """ self._make_mountpoint(var_name='vss', suffix="vss", in_paths=True) try: _util.check_call_(["vshadowmount", "-o", str(self.offset), self.get_raw_path(), self._paths['vss']]) except Exception as e: logger.exception("Failed mounting the volume shadow copies.") raise SubsystemError(e) else: return self.volumes.detect_volumes(vstype='vss')
Method to call vshadowmount and mount NTFS volume shadow copies. :return: iterable with the :class:`Volume` objects of the VSS :raises CommandNotFoundError: if the underlying command does not exist :raises SubSystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L342-L359
ralphje/imagemounter
imagemounter/volume.py
Volume._should_mount
def _should_mount(self, only_mount=None, skip_mount=None): """Indicates whether this volume should be mounted. Internal method, used by imount.py""" om = only_mount is None or \ self.index in only_mount or \ self.info.get('lastmountpoint') in only_mount or \ self.info.get('label') in only_mount sm = skip_mount is None or \ (self.index not in skip_mount and self.info.get('lastmountpoint') not in skip_mount and self.info.get('label') not in skip_mount) return om and sm
python
def _should_mount(self, only_mount=None, skip_mount=None): """Indicates whether this volume should be mounted. Internal method, used by imount.py""" om = only_mount is None or \ self.index in only_mount or \ self.info.get('lastmountpoint') in only_mount or \ self.info.get('label') in only_mount sm = skip_mount is None or \ (self.index not in skip_mount and self.info.get('lastmountpoint') not in skip_mount and self.info.get('label') not in skip_mount) return om and sm
Indicates whether this volume should be mounted. Internal method, used by imount.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L361-L372
ralphje/imagemounter
imagemounter/volume.py
Volume.init
def init(self, only_mount=None, skip_mount=None, swallow_exceptions=True): """Generator that mounts this volume and either yields itself or recursively generates its subvolumes. More specifically, this function will call :func:`load_fsstat_data` (iff *no_stats* is False), followed by :func:`mount`, followed by a call to :func:`detect_mountpoint`, after which ``self`` is yielded, or the result of the :func:`init` call on each subvolume is yielded :param only_mount: if specified, only volume indexes in this list are mounted. Volume indexes are strings. :param skip_mount: if specified, volume indexes in this list are not mounted. :param swallow_exceptions: if True, any error occuring when mounting the volume is swallowed and added as an exception attribute to the yielded objects. """ if swallow_exceptions: self.exception = None try: if not self._should_mount(only_mount, skip_mount): yield self return if not self.init_volume(): yield self return except ImageMounterError as e: if swallow_exceptions: self.exception = e else: raise if not self.volumes: yield self else: for v in self.volumes: for s in v.init(only_mount, skip_mount, swallow_exceptions): yield s
python
def init(self, only_mount=None, skip_mount=None, swallow_exceptions=True): """Generator that mounts this volume and either yields itself or recursively generates its subvolumes. More specifically, this function will call :func:`load_fsstat_data` (iff *no_stats* is False), followed by :func:`mount`, followed by a call to :func:`detect_mountpoint`, after which ``self`` is yielded, or the result of the :func:`init` call on each subvolume is yielded :param only_mount: if specified, only volume indexes in this list are mounted. Volume indexes are strings. :param skip_mount: if specified, volume indexes in this list are not mounted. :param swallow_exceptions: if True, any error occuring when mounting the volume is swallowed and added as an exception attribute to the yielded objects. """ if swallow_exceptions: self.exception = None try: if not self._should_mount(only_mount, skip_mount): yield self return if not self.init_volume(): yield self return except ImageMounterError as e: if swallow_exceptions: self.exception = e else: raise if not self.volumes: yield self else: for v in self.volumes: for s in v.init(only_mount, skip_mount, swallow_exceptions): yield s
Generator that mounts this volume and either yields itself or recursively generates its subvolumes. More specifically, this function will call :func:`load_fsstat_data` (iff *no_stats* is False), followed by :func:`mount`, followed by a call to :func:`detect_mountpoint`, after which ``self`` is yielded, or the result of the :func:`init` call on each subvolume is yielded :param only_mount: if specified, only volume indexes in this list are mounted. Volume indexes are strings. :param skip_mount: if specified, volume indexes in this list are not mounted. :param swallow_exceptions: if True, any error occuring when mounting the volume is swallowed and added as an exception attribute to the yielded objects.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L374-L409
ralphje/imagemounter
imagemounter/volume.py
Volume.init_volume
def init_volume(self, fstype=None): """Initializes a single volume. You should use this method instead of :func:`mount` if you want some sane checks before mounting. """ logger.debug("Initializing volume {0}".format(self)) if not self._should_mount(): return False if self.flag != 'alloc': return False if self.info.get('raid_status') == 'waiting': logger.info("RAID array %s not ready for mounting", self) return False if self.is_mounted: logger.info("%s is currently mounted, not mounting it again", self) return False logger.info("Mounting volume {0}".format(self)) self.mount(fstype=fstype) self.detect_mountpoint() return True
python
def init_volume(self, fstype=None): """Initializes a single volume. You should use this method instead of :func:`mount` if you want some sane checks before mounting. """ logger.debug("Initializing volume {0}".format(self)) if not self._should_mount(): return False if self.flag != 'alloc': return False if self.info.get('raid_status') == 'waiting': logger.info("RAID array %s not ready for mounting", self) return False if self.is_mounted: logger.info("%s is currently mounted, not mounting it again", self) return False logger.info("Mounting volume {0}".format(self)) self.mount(fstype=fstype) self.detect_mountpoint() return True
Initializes a single volume. You should use this method instead of :func:`mount` if you want some sane checks before mounting.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L411-L436
ralphje/imagemounter
imagemounter/volume.py
Volume._make_mountpoint
def _make_mountpoint(self, casename=None, var_name='mountpoint', suffix='', in_paths=False): """Creates a directory that can be used as a mountpoint. The directory is stored in :attr:`mountpoint`, or the varname as specified by the argument. If in_paths is True, the path is stored in the :attr:`_paths` attribute instead. :returns: the mountpoint path :raises NoMountpointAvailableError: if no mountpoint could be made """ parser = self.disk.parser if parser.mountdir and not os.path.exists(parser.mountdir): os.makedirs(parser.mountdir) if parser.pretty: md = parser.mountdir or tempfile.gettempdir() case_name = casename or self.disk.parser.casename or \ ".".join(os.path.basename(self.disk.paths[0]).split('.')[0:-1]) or \ os.path.basename(self.disk.paths[0]) if self.disk.parser.casename == case_name: # the casename is already in the path in this case pretty_label = "{0}-{1}".format(self.index, self.get_safe_label() or self.fstype or 'volume') else: pretty_label = "{0}-{1}-{2}".format(case_name, self.index, self.get_safe_label() or self.fstype or 'volume') if suffix: pretty_label += "-" + suffix path = os.path.join(md, pretty_label) # check if path already exists, otherwise try to find another nice path if os.path.exists(path): for i in range(2, 100): path = os.path.join(md, pretty_label + "-" + str(i)) if not os.path.exists(path): break else: logger.error("Could not find free mountdir.") raise NoMountpointAvailableError() # noinspection PyBroadException try: os.mkdir(path, 777) if in_paths: self._paths[var_name] = path else: setattr(self, var_name, path) return path except Exception: logger.exception("Could not create mountdir.") raise NoMountpointAvailableError() else: t = tempfile.mkdtemp(prefix='im_' + self.index + '_', suffix='_' + self.get_safe_label() + ("_" + suffix if suffix else ""), dir=parser.mountdir) if in_paths: self._paths[var_name] = t else: setattr(self, var_name, t) return t
python
def _make_mountpoint(self, casename=None, var_name='mountpoint', suffix='', in_paths=False): """Creates a directory that can be used as a mountpoint. The directory is stored in :attr:`mountpoint`, or the varname as specified by the argument. If in_paths is True, the path is stored in the :attr:`_paths` attribute instead. :returns: the mountpoint path :raises NoMountpointAvailableError: if no mountpoint could be made """ parser = self.disk.parser if parser.mountdir and not os.path.exists(parser.mountdir): os.makedirs(parser.mountdir) if parser.pretty: md = parser.mountdir or tempfile.gettempdir() case_name = casename or self.disk.parser.casename or \ ".".join(os.path.basename(self.disk.paths[0]).split('.')[0:-1]) or \ os.path.basename(self.disk.paths[0]) if self.disk.parser.casename == case_name: # the casename is already in the path in this case pretty_label = "{0}-{1}".format(self.index, self.get_safe_label() or self.fstype or 'volume') else: pretty_label = "{0}-{1}-{2}".format(case_name, self.index, self.get_safe_label() or self.fstype or 'volume') if suffix: pretty_label += "-" + suffix path = os.path.join(md, pretty_label) # check if path already exists, otherwise try to find another nice path if os.path.exists(path): for i in range(2, 100): path = os.path.join(md, pretty_label + "-" + str(i)) if not os.path.exists(path): break else: logger.error("Could not find free mountdir.") raise NoMountpointAvailableError() # noinspection PyBroadException try: os.mkdir(path, 777) if in_paths: self._paths[var_name] = path else: setattr(self, var_name, path) return path except Exception: logger.exception("Could not create mountdir.") raise NoMountpointAvailableError() else: t = tempfile.mkdtemp(prefix='im_' + self.index + '_', suffix='_' + self.get_safe_label() + ("_" + suffix if suffix else ""), dir=parser.mountdir) if in_paths: self._paths[var_name] = t else: setattr(self, var_name, t) return t
Creates a directory that can be used as a mountpoint. The directory is stored in :attr:`mountpoint`, or the varname as specified by the argument. If in_paths is True, the path is stored in the :attr:`_paths` attribute instead. :returns: the mountpoint path :raises NoMountpointAvailableError: if no mountpoint could be made
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L438-L495
ralphje/imagemounter
imagemounter/volume.py
Volume._clear_mountpoint
def _clear_mountpoint(self): """Clears a created mountpoint. Does not unmount it, merely deletes it.""" if self.mountpoint: os.rmdir(self.mountpoint) self.mountpoint = ""
python
def _clear_mountpoint(self): """Clears a created mountpoint. Does not unmount it, merely deletes it.""" if self.mountpoint: os.rmdir(self.mountpoint) self.mountpoint = ""
Clears a created mountpoint. Does not unmount it, merely deletes it.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L497-L502
ralphje/imagemounter
imagemounter/volume.py
Volume._find_loopback
def _find_loopback(self, use_loopback=True, var_name='loopback'): """Finds a free loopback device that can be used. The loopback is stored in :attr:`loopback`. If *use_loopback* is True, the loopback will also be used directly. :returns: the loopback address :raises NoLoopbackAvailableError: if no loopback could be found """ # noinspection PyBroadException try: loopback = _util.check_output_(['losetup', '-f']).strip() setattr(self, var_name, loopback) except Exception: logger.warning("No free loopback device found.", exc_info=True) raise NoLoopbackAvailableError() # noinspection PyBroadException if use_loopback: try: cmd = ['losetup', '-o', str(self.offset), '--sizelimit', str(self.size), loopback, self.get_raw_path()] if not self.disk.read_write: cmd.insert(1, '-r') _util.check_call_(cmd, stdout=subprocess.PIPE) except Exception: logger.exception("Loopback device could not be mounted.") raise NoLoopbackAvailableError() return loopback
python
def _find_loopback(self, use_loopback=True, var_name='loopback'): """Finds a free loopback device that can be used. The loopback is stored in :attr:`loopback`. If *use_loopback* is True, the loopback will also be used directly. :returns: the loopback address :raises NoLoopbackAvailableError: if no loopback could be found """ # noinspection PyBroadException try: loopback = _util.check_output_(['losetup', '-f']).strip() setattr(self, var_name, loopback) except Exception: logger.warning("No free loopback device found.", exc_info=True) raise NoLoopbackAvailableError() # noinspection PyBroadException if use_loopback: try: cmd = ['losetup', '-o', str(self.offset), '--sizelimit', str(self.size), loopback, self.get_raw_path()] if not self.disk.read_write: cmd.insert(1, '-r') _util.check_call_(cmd, stdout=subprocess.PIPE) except Exception: logger.exception("Loopback device could not be mounted.") raise NoLoopbackAvailableError() return loopback
Finds a free loopback device that can be used. The loopback is stored in :attr:`loopback`. If *use_loopback* is True, the loopback will also be used directly. :returns: the loopback address :raises NoLoopbackAvailableError: if no loopback could be found
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L504-L531
ralphje/imagemounter
imagemounter/volume.py
Volume.determine_fs_type
def determine_fs_type(self): """Determines the FS type for this partition. This function is used internally to determine which mount system to use, based on the file system description. Return values include *ext*, *ufs*, *ntfs*, *lvm* and *luks*. Note: does not do anything if fstype is already set to something sensible. """ fstype_fallback = None if isinstance(self.fstype, filesystems.FallbackFileSystemType): fstype_fallback = self.fstype.fallback elif isinstance(self.fstype, filesystems.FileSystemType): return self.fstype result = collections.Counter() for source, description in (('fsdescription', self.info.get('fsdescription')), ('guid', self.info.get('guid')), ('blikid', self._get_blkid_type), ('magic', self._get_magic_type)): # For efficiency reasons, not all functions are called instantly. if callable(description): description = description() logger.debug("Trying to determine fs type from {} '{}'".format(source, description)) if not description: continue # Iterate over all results and update the certainty of all FS types for type in FILE_SYSTEM_TYPES.values(): result.update(type.detect(source, description)) # Now sort the results by their certainty logger.debug("Current certainty levels: {}".format(result)) # If we have not found any candidates, we continue if not result: continue # If we have candidates of which we are not entirely certain, we just continue max_res = result.most_common(1)[0][1] if max_res < 50: logger.debug("Highest certainty item is lower than 50, continuing...") # If we have multiple candidates with the same score, we just continue elif len([True for type, certainty in result.items() if certainty == max_res]) > 1: logger.debug("Multiple items with highest certainty level, so continuing...") else: self.fstype = result.most_common(1)[0][0] return self.fstype # Now be more lax with the fallback: if result: max_res = result.most_common(1)[0][1] if max_res > 0: self.fstype = result.most_common(1)[0][0] return self.fstype if fstype_fallback: self.fstype = fstype_fallback return self.fstype
python
def determine_fs_type(self): """Determines the FS type for this partition. This function is used internally to determine which mount system to use, based on the file system description. Return values include *ext*, *ufs*, *ntfs*, *lvm* and *luks*. Note: does not do anything if fstype is already set to something sensible. """ fstype_fallback = None if isinstance(self.fstype, filesystems.FallbackFileSystemType): fstype_fallback = self.fstype.fallback elif isinstance(self.fstype, filesystems.FileSystemType): return self.fstype result = collections.Counter() for source, description in (('fsdescription', self.info.get('fsdescription')), ('guid', self.info.get('guid')), ('blikid', self._get_blkid_type), ('magic', self._get_magic_type)): # For efficiency reasons, not all functions are called instantly. if callable(description): description = description() logger.debug("Trying to determine fs type from {} '{}'".format(source, description)) if not description: continue # Iterate over all results and update the certainty of all FS types for type in FILE_SYSTEM_TYPES.values(): result.update(type.detect(source, description)) # Now sort the results by their certainty logger.debug("Current certainty levels: {}".format(result)) # If we have not found any candidates, we continue if not result: continue # If we have candidates of which we are not entirely certain, we just continue max_res = result.most_common(1)[0][1] if max_res < 50: logger.debug("Highest certainty item is lower than 50, continuing...") # If we have multiple candidates with the same score, we just continue elif len([True for type, certainty in result.items() if certainty == max_res]) > 1: logger.debug("Multiple items with highest certainty level, so continuing...") else: self.fstype = result.most_common(1)[0][0] return self.fstype # Now be more lax with the fallback: if result: max_res = result.most_common(1)[0][1] if max_res > 0: self.fstype = result.most_common(1)[0][0] return self.fstype if fstype_fallback: self.fstype = fstype_fallback return self.fstype
Determines the FS type for this partition. This function is used internally to determine which mount system to use, based on the file system description. Return values include *ext*, *ufs*, *ntfs*, *lvm* and *luks*. Note: does not do anything if fstype is already set to something sensible.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L538-L595
ralphje/imagemounter
imagemounter/volume.py
Volume.mount
def mount(self, fstype=None): """Based on the file system type as determined by :func:`determine_fs_type`, the proper mount command is executed for this volume. The volume is mounted in a temporary path (or a pretty path if :attr:`pretty` is enabled) in the mountpoint as specified by :attr:`mountpoint`. If the file system type is a LUKS container or LVM, additional methods may be called, adding subvolumes to :attr:`volumes` :raises NotMountedError: if the parent volume/disk is not mounted :raises NoMountpointAvailableError: if no mountpoint was found :raises NoLoopbackAvailableError: if no loopback device was found :raises UnsupportedFilesystemError: if the fstype is not supported for mounting :raises SubsystemError: if one of the underlying commands failed """ if not self.parent.is_mounted: raise NotMountedError(self.parent) if fstype is None: fstype = self.determine_fs_type() self._load_fsstat_data() # Prepare mount command try: fstype.mount(self) self.was_mounted = True self.is_mounted = True self.fstype = fstype except Exception as e: logger.exception("Execution failed due to {} {}".format(type(e), e), exc_info=True) if not isinstance(e, ImageMounterError): raise SubsystemError(e) else: raise
python
def mount(self, fstype=None): """Based on the file system type as determined by :func:`determine_fs_type`, the proper mount command is executed for this volume. The volume is mounted in a temporary path (or a pretty path if :attr:`pretty` is enabled) in the mountpoint as specified by :attr:`mountpoint`. If the file system type is a LUKS container or LVM, additional methods may be called, adding subvolumes to :attr:`volumes` :raises NotMountedError: if the parent volume/disk is not mounted :raises NoMountpointAvailableError: if no mountpoint was found :raises NoLoopbackAvailableError: if no loopback device was found :raises UnsupportedFilesystemError: if the fstype is not supported for mounting :raises SubsystemError: if one of the underlying commands failed """ if not self.parent.is_mounted: raise NotMountedError(self.parent) if fstype is None: fstype = self.determine_fs_type() self._load_fsstat_data() # Prepare mount command try: fstype.mount(self) self.was_mounted = True self.is_mounted = True self.fstype = fstype except Exception as e: logger.exception("Execution failed due to {} {}".format(type(e), e), exc_info=True) if not isinstance(e, ImageMounterError): raise SubsystemError(e) else: raise
Based on the file system type as determined by :func:`determine_fs_type`, the proper mount command is executed for this volume. The volume is mounted in a temporary path (or a pretty path if :attr:`pretty` is enabled) in the mountpoint as specified by :attr:`mountpoint`. If the file system type is a LUKS container or LVM, additional methods may be called, adding subvolumes to :attr:`volumes` :raises NotMountedError: if the parent volume/disk is not mounted :raises NoMountpointAvailableError: if no mountpoint was found :raises NoLoopbackAvailableError: if no loopback device was found :raises UnsupportedFilesystemError: if the fstype is not supported for mounting :raises SubsystemError: if one of the underlying commands failed
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L597-L632
ralphje/imagemounter
imagemounter/volume.py
Volume.bindmount
def bindmount(self, mountpoint): """Bind mounts the volume to another mountpoint. Only works if the volume is already mounted. :raises NotMountedError: when the volume is not yet mounted :raises SubsystemError: when the underlying command failed """ if not self.mountpoint: raise NotMountedError(self) try: _util.check_call_(['mount', '--bind', self.mountpoint, mountpoint], stdout=subprocess.PIPE) if 'bindmounts' in self._paths: self._paths['bindmounts'].append(mountpoint) else: self._paths['bindmounts'] = [mountpoint] return True except Exception as e: logger.exception("Error bind mounting {0}.".format(self)) raise SubsystemError(e)
python
def bindmount(self, mountpoint): """Bind mounts the volume to another mountpoint. Only works if the volume is already mounted. :raises NotMountedError: when the volume is not yet mounted :raises SubsystemError: when the underlying command failed """ if not self.mountpoint: raise NotMountedError(self) try: _util.check_call_(['mount', '--bind', self.mountpoint, mountpoint], stdout=subprocess.PIPE) if 'bindmounts' in self._paths: self._paths['bindmounts'].append(mountpoint) else: self._paths['bindmounts'] = [mountpoint] return True except Exception as e: logger.exception("Error bind mounting {0}.".format(self)) raise SubsystemError(e)
Bind mounts the volume to another mountpoint. Only works if the volume is already mounted. :raises NotMountedError: when the volume is not yet mounted :raises SubsystemError: when the underlying command failed
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L634-L652
ralphje/imagemounter
imagemounter/volume.py
Volume.get_volumes
def get_volumes(self): """Recursively gets a list of all subvolumes and the current volume.""" if self.volumes: volumes = [] for v in self.volumes: volumes.extend(v.get_volumes()) volumes.append(self) return volumes else: return [self]
python
def get_volumes(self): """Recursively gets a list of all subvolumes and the current volume.""" if self.volumes: volumes = [] for v in self.volumes: volumes.extend(v.get_volumes()) volumes.append(self) return volumes else: return [self]
Recursively gets a list of all subvolumes and the current volume.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L654-L664
ralphje/imagemounter
imagemounter/volume.py
Volume._load_fsstat_data
def _load_fsstat_data(self, timeout=3): """Using :command:`fsstat`, adds some additional information of the volume to the Volume.""" def stats_thread(): try: cmd = ['fsstat', self.get_raw_path(), '-o', str(self.offset // self.disk.block_size)] # Setting the fstype explicitly makes fsstat much faster and more reliable # In some versions, the auto-detect yaffs2 check takes ages for large images fstype = { "ntfs": "ntfs", "fat": "fat", "ext": "ext", "iso": "iso9660", "hfs+": "hfs", "ufs": "ufs", "swap": "swap", "exfat": "exfat", }.get(self.fstype, None) if fstype: cmd.extend(["-f", fstype]) logger.debug('$ {0}'.format(' '.join(cmd))) stats_thread.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in iter(stats_thread.process.stdout.readline, b''): line = line.decode('utf-8') logger.debug('< {0}'.format(line)) if line.startswith("File System Type:"): self.info['statfstype'] = line[line.index(':') + 2:].strip() elif line.startswith("Last Mount Point:") or line.startswith("Last mounted on:"): self.info['lastmountpoint'] = line[line.index(':') + 2:].strip().replace("//", "/") elif line.startswith("Volume Name:") and not self.info.get('label'): self.info['label'] = line[line.index(':') + 2:].strip() elif line.startswith("Version:"): self.info['version'] = line[line.index(':') + 2:].strip() elif line.startswith("Source OS:"): self.info['version'] = line[line.index(':') + 2:].strip() elif 'CYLINDER GROUP INFORMATION' in line or 'BLOCK GROUP INFORMATION' in line: # noinspection PyBroadException try: stats_thread.process.terminate() logger.debug("Terminated fsstat at cylinder/block group information.") except Exception: pass break if self.info.get('lastmountpoint') and self.info.get('label'): self.info['label'] = "{0} ({1})".format(self.info['lastmountpoint'], self.info['label']) elif self.info.get('lastmountpoint') and not self.info.get('label'): self.info['label'] = self.info['lastmountpoint'] elif not self.info.get('lastmountpoint') and self.info.get('label') and \ self.info['label'].startswith("/"): # e.g. /boot1 if self.info['label'].endswith("1"): self.info['lastmountpoint'] = self.info['label'][:-1] else: self.info['lastmountpoint'] = self.info['label'] except Exception: # ignore any exceptions here. logger.exception("Error while obtaining stats.") stats_thread.process = None thread = threading.Thread(target=stats_thread) thread.start() thread.join(timeout) if thread.is_alive(): # noinspection PyBroadException try: stats_thread.process.terminate() except Exception: pass thread.join() logger.debug("Killed fsstat after {0}s".format(timeout))
python
def _load_fsstat_data(self, timeout=3): """Using :command:`fsstat`, adds some additional information of the volume to the Volume.""" def stats_thread(): try: cmd = ['fsstat', self.get_raw_path(), '-o', str(self.offset // self.disk.block_size)] # Setting the fstype explicitly makes fsstat much faster and more reliable # In some versions, the auto-detect yaffs2 check takes ages for large images fstype = { "ntfs": "ntfs", "fat": "fat", "ext": "ext", "iso": "iso9660", "hfs+": "hfs", "ufs": "ufs", "swap": "swap", "exfat": "exfat", }.get(self.fstype, None) if fstype: cmd.extend(["-f", fstype]) logger.debug('$ {0}'.format(' '.join(cmd))) stats_thread.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in iter(stats_thread.process.stdout.readline, b''): line = line.decode('utf-8') logger.debug('< {0}'.format(line)) if line.startswith("File System Type:"): self.info['statfstype'] = line[line.index(':') + 2:].strip() elif line.startswith("Last Mount Point:") or line.startswith("Last mounted on:"): self.info['lastmountpoint'] = line[line.index(':') + 2:].strip().replace("//", "/") elif line.startswith("Volume Name:") and not self.info.get('label'): self.info['label'] = line[line.index(':') + 2:].strip() elif line.startswith("Version:"): self.info['version'] = line[line.index(':') + 2:].strip() elif line.startswith("Source OS:"): self.info['version'] = line[line.index(':') + 2:].strip() elif 'CYLINDER GROUP INFORMATION' in line or 'BLOCK GROUP INFORMATION' in line: # noinspection PyBroadException try: stats_thread.process.terminate() logger.debug("Terminated fsstat at cylinder/block group information.") except Exception: pass break if self.info.get('lastmountpoint') and self.info.get('label'): self.info['label'] = "{0} ({1})".format(self.info['lastmountpoint'], self.info['label']) elif self.info.get('lastmountpoint') and not self.info.get('label'): self.info['label'] = self.info['lastmountpoint'] elif not self.info.get('lastmountpoint') and self.info.get('label') and \ self.info['label'].startswith("/"): # e.g. /boot1 if self.info['label'].endswith("1"): self.info['lastmountpoint'] = self.info['label'][:-1] else: self.info['lastmountpoint'] = self.info['label'] except Exception: # ignore any exceptions here. logger.exception("Error while obtaining stats.") stats_thread.process = None thread = threading.Thread(target=stats_thread) thread.start() thread.join(timeout) if thread.is_alive(): # noinspection PyBroadException try: stats_thread.process.terminate() except Exception: pass thread.join() logger.debug("Killed fsstat after {0}s".format(timeout))
Using :command:`fsstat`, adds some additional information of the volume to the Volume.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L667-L734
ralphje/imagemounter
imagemounter/volume.py
Volume.detect_mountpoint
def detect_mountpoint(self): """Attempts to detect the previous mountpoint if this was not done through :func:`load_fsstat_data`. This detection does some heuristic method on the mounted volume. """ if self.info.get('lastmountpoint'): return self.info.get('lastmountpoint') if not self.mountpoint: return None result = None paths = os.listdir(self.mountpoint) if 'grub' in paths: result = '/boot' elif 'usr' in paths and 'var' in paths and 'root' in paths: result = '/' elif 'bin' in paths and 'lib' in paths and 'local' in paths and 'src' in paths and 'usr' not in paths: result = '/usr' elif 'bin' in paths and 'lib' in paths and 'local' not in paths and 'src' in paths and 'usr' not in paths: result = '/usr/local' elif 'lib' in paths and 'local' in paths and 'tmp' in paths and 'var' not in paths: result = '/var' # elif sum(['bin' in paths, 'boot' in paths, 'cdrom' in paths, 'dev' in paths, 'etc' in paths, 'home' in paths, # 'lib' in paths, 'lib64' in paths, 'media' in paths, 'mnt' in paths, 'opt' in paths, # 'proc' in paths, 'root' in paths, 'sbin' in paths, 'srv' in paths, 'sys' in paths, 'tmp' in paths, # 'usr' in paths, 'var' in paths]) > 11: # result = '/' if result: self.info['lastmountpoint'] = result if not self.info.get('label'): self.info['label'] = self.info['lastmountpoint'] logger.info("Detected mountpoint as {0} based on files in volume".format(self.info['lastmountpoint'])) return result
python
def detect_mountpoint(self): """Attempts to detect the previous mountpoint if this was not done through :func:`load_fsstat_data`. This detection does some heuristic method on the mounted volume. """ if self.info.get('lastmountpoint'): return self.info.get('lastmountpoint') if not self.mountpoint: return None result = None paths = os.listdir(self.mountpoint) if 'grub' in paths: result = '/boot' elif 'usr' in paths and 'var' in paths and 'root' in paths: result = '/' elif 'bin' in paths and 'lib' in paths and 'local' in paths and 'src' in paths and 'usr' not in paths: result = '/usr' elif 'bin' in paths and 'lib' in paths and 'local' not in paths and 'src' in paths and 'usr' not in paths: result = '/usr/local' elif 'lib' in paths and 'local' in paths and 'tmp' in paths and 'var' not in paths: result = '/var' # elif sum(['bin' in paths, 'boot' in paths, 'cdrom' in paths, 'dev' in paths, 'etc' in paths, 'home' in paths, # 'lib' in paths, 'lib64' in paths, 'media' in paths, 'mnt' in paths, 'opt' in paths, # 'proc' in paths, 'root' in paths, 'sbin' in paths, 'srv' in paths, 'sys' in paths, 'tmp' in paths, # 'usr' in paths, 'var' in paths]) > 11: # result = '/' if result: self.info['lastmountpoint'] = result if not self.info.get('label'): self.info['label'] = self.info['lastmountpoint'] logger.info("Detected mountpoint as {0} based on files in volume".format(self.info['lastmountpoint'])) return result
Attempts to detect the previous mountpoint if this was not done through :func:`load_fsstat_data`. This detection does some heuristic method on the mounted volume.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L736-L770
ralphje/imagemounter
imagemounter/volume.py
Volume.unmount
def unmount(self, allow_lazy=False): """Unounts the volume from the filesystem. :raises SubsystemError: if one of the underlying processes fails :raises CleanupError: if the cleanup fails """ for volume in self.volumes: try: volume.unmount(allow_lazy=allow_lazy) except ImageMounterError: pass if self.is_mounted: logger.info("Unmounting volume %s", self) if self.loopback and self.info.get('volume_group'): _util.check_call_(["lvm", 'vgchange', '-a', 'n', self.info['volume_group']], wrap_error=True, stdout=subprocess.PIPE) self.info['volume_group'] = "" if self.loopback and self._paths.get('luks'): _util.check_call_(['cryptsetup', 'luksClose', self._paths['luks']], wrap_error=True, stdout=subprocess.PIPE) del self._paths['luks'] if self._paths.get('bde'): try: _util.clean_unmount(['fusermount', '-u'], self._paths['bde']) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self._paths['bde']) del self._paths['bde'] if self._paths.get('md'): md_path = self._paths['md'] del self._paths['md'] # removing it here to ensure we do not enter an infinite loop, will add it back later # MD arrays are a bit complicated, we also check all other volumes that are part of this array and # unmount them as well. logger.debug("All other volumes that use %s as well will also be unmounted", md_path) for v in self.disk.get_volumes(): if v != self and v._paths.get('md') == md_path: v.unmount(allow_lazy=allow_lazy) try: _util.check_output_(["mdadm", '--stop', md_path], stderr=subprocess.STDOUT) except Exception as e: self._paths['md'] = md_path raise SubsystemError(e) if self._paths.get('vss'): try: _util.clean_unmount(['fusermount', '-u'], self._paths['vss']) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self._paths['vss']) del self._paths['vss'] if self.loopback: _util.check_call_(['losetup', '-d', self.loopback], wrap_error=True) self.loopback = "" if self._paths.get('bindmounts'): for mp in self._paths['bindmounts']: _util.clean_unmount(['umount'], mp, rmdir=False) del self._paths['bindmounts'] if self.mountpoint: _util.clean_unmount(['umount'], self.mountpoint) self.mountpoint = "" if self._paths.get('carve'): try: shutil.rmtree(self._paths['carve']) except OSError as e: raise SubsystemError(e) else: del self._paths['carve'] self.is_mounted = False
python
def unmount(self, allow_lazy=False): """Unounts the volume from the filesystem. :raises SubsystemError: if one of the underlying processes fails :raises CleanupError: if the cleanup fails """ for volume in self.volumes: try: volume.unmount(allow_lazy=allow_lazy) except ImageMounterError: pass if self.is_mounted: logger.info("Unmounting volume %s", self) if self.loopback and self.info.get('volume_group'): _util.check_call_(["lvm", 'vgchange', '-a', 'n', self.info['volume_group']], wrap_error=True, stdout=subprocess.PIPE) self.info['volume_group'] = "" if self.loopback and self._paths.get('luks'): _util.check_call_(['cryptsetup', 'luksClose', self._paths['luks']], wrap_error=True, stdout=subprocess.PIPE) del self._paths['luks'] if self._paths.get('bde'): try: _util.clean_unmount(['fusermount', '-u'], self._paths['bde']) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self._paths['bde']) del self._paths['bde'] if self._paths.get('md'): md_path = self._paths['md'] del self._paths['md'] # removing it here to ensure we do not enter an infinite loop, will add it back later # MD arrays are a bit complicated, we also check all other volumes that are part of this array and # unmount them as well. logger.debug("All other volumes that use %s as well will also be unmounted", md_path) for v in self.disk.get_volumes(): if v != self and v._paths.get('md') == md_path: v.unmount(allow_lazy=allow_lazy) try: _util.check_output_(["mdadm", '--stop', md_path], stderr=subprocess.STDOUT) except Exception as e: self._paths['md'] = md_path raise SubsystemError(e) if self._paths.get('vss'): try: _util.clean_unmount(['fusermount', '-u'], self._paths['vss']) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self._paths['vss']) del self._paths['vss'] if self.loopback: _util.check_call_(['losetup', '-d', self.loopback], wrap_error=True) self.loopback = "" if self._paths.get('bindmounts'): for mp in self._paths['bindmounts']: _util.clean_unmount(['umount'], mp, rmdir=False) del self._paths['bindmounts'] if self.mountpoint: _util.clean_unmount(['umount'], self.mountpoint) self.mountpoint = "" if self._paths.get('carve'): try: shutil.rmtree(self._paths['carve']) except OSError as e: raise SubsystemError(e) else: del self._paths['carve'] self.is_mounted = False
Unounts the volume from the filesystem. :raises SubsystemError: if one of the underlying processes fails :raises CleanupError: if the cleanup fails
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L773-L854
ralphje/imagemounter
imagemounter/dependencies.py
require
def require(*requirements, **kwargs): """Decorator that can be used to require requirements. :param requirements: List of requirements that should be verified :param none_on_failure: If true, does not raise a PrerequisiteFailedError, but instead returns None """ # TODO: require(*requirements, none_on_failure=False) is not supported by Python 2 none_on_failure = kwargs.get('none_on_failure', False) def inner(f): @functools.wraps(f) def wrapper(*args, **kwargs): for req in requirements: if none_on_failure: if not getattr(req, 'is_available'): return None else: getattr(req, 'require')() return f(*args, **kwargs) return wrapper return inner
python
def require(*requirements, **kwargs): """Decorator that can be used to require requirements. :param requirements: List of requirements that should be verified :param none_on_failure: If true, does not raise a PrerequisiteFailedError, but instead returns None """ # TODO: require(*requirements, none_on_failure=False) is not supported by Python 2 none_on_failure = kwargs.get('none_on_failure', False) def inner(f): @functools.wraps(f) def wrapper(*args, **kwargs): for req in requirements: if none_on_failure: if not getattr(req, 'is_available'): return None else: getattr(req, 'require')() return f(*args, **kwargs) return wrapper return inner
Decorator that can be used to require requirements. :param requirements: List of requirements that should be verified :param none_on_failure: If true, does not raise a PrerequisiteFailedError, but instead returns None
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/dependencies.py#L8-L28
ralphje/imagemounter
imagemounter/dependencies.py
CommandDependency.status_message
def status_message(self): """Detailed message about whether the dependency is installed. :rtype: str """ if self.is_available: return "INSTALLED {0!s}" elif self.why and self.package: return "MISSING {0!s:<20}needed for {0.why}, part of the {0.package} package" elif self.why: return "MISSING {0!s:<20}needed for {0.why}" elif self.package: return "MISSING {0!s:<20}part of the {0.package} package" else: return "MISSING {0!s:<20}"
python
def status_message(self): """Detailed message about whether the dependency is installed. :rtype: str """ if self.is_available: return "INSTALLED {0!s}" elif self.why and self.package: return "MISSING {0!s:<20}needed for {0.why}, part of the {0.package} package" elif self.why: return "MISSING {0!s:<20}needed for {0.why}" elif self.package: return "MISSING {0!s:<20}part of the {0.package} package" else: return "MISSING {0!s:<20}"
Detailed message about whether the dependency is installed. :rtype: str
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/dependencies.py#L97-L111
ralphje/imagemounter
imagemounter/filesystems.py
FileSystemType.detect
def detect(self, source, description): """Detects the type of a volume based on the provided information. It returns the plausibility for all file system types as a dict. Although it is only responsible for returning its own plausibility, it is possible that one type of filesystem is more likely than another, e.g. when NTFS detects it is likely to be NTFS, it can also update the plausibility of exFAT to indicate it is less likely. All scores a cumulative. When multiple sources are used, it is also cumulative. For instance, if run 1 is 25 certain, and run 2 is 25 certain as well, it will become 50 certain. :meth:`Volume.detect_fs_type` will return immediately if the score is higher than 50 and there is only 1 FS type with the highest score. Otherwise, it will continue with the next run. If at the end of all runs no viable FS type was found, it will return the highest scoring FS type (if it is > 0), otherwise it will return the FS type fallback. :param source: The source of the description :param description: The description to detect with :return: Dict with mapping of FsType() objects to scores """ if source == "guid" and description in self.guids: return {self: 100} description = description.lower() if description == self.type: return {self: 100} elif re.search(r"\b" + self.type + r"\b", description): return {self: 80} elif any((re.search(r"\b" + alias + r"\b", description) for alias in self.aliases)): return {self: 70} return {}
python
def detect(self, source, description): """Detects the type of a volume based on the provided information. It returns the plausibility for all file system types as a dict. Although it is only responsible for returning its own plausibility, it is possible that one type of filesystem is more likely than another, e.g. when NTFS detects it is likely to be NTFS, it can also update the plausibility of exFAT to indicate it is less likely. All scores a cumulative. When multiple sources are used, it is also cumulative. For instance, if run 1 is 25 certain, and run 2 is 25 certain as well, it will become 50 certain. :meth:`Volume.detect_fs_type` will return immediately if the score is higher than 50 and there is only 1 FS type with the highest score. Otherwise, it will continue with the next run. If at the end of all runs no viable FS type was found, it will return the highest scoring FS type (if it is > 0), otherwise it will return the FS type fallback. :param source: The source of the description :param description: The description to detect with :return: Dict with mapping of FsType() objects to scores """ if source == "guid" and description in self.guids: return {self: 100} description = description.lower() if description == self.type: return {self: 100} elif re.search(r"\b" + self.type + r"\b", description): return {self: 80} elif any((re.search(r"\b" + alias + r"\b", description) for alias in self.aliases)): return {self: 70} return {}
Detects the type of a volume based on the provided information. It returns the plausibility for all file system types as a dict. Although it is only responsible for returning its own plausibility, it is possible that one type of filesystem is more likely than another, e.g. when NTFS detects it is likely to be NTFS, it can also update the plausibility of exFAT to indicate it is less likely. All scores a cumulative. When multiple sources are used, it is also cumulative. For instance, if run 1 is 25 certain, and run 2 is 25 certain as well, it will become 50 certain. :meth:`Volume.detect_fs_type` will return immediately if the score is higher than 50 and there is only 1 FS type with the highest score. Otherwise, it will continue with the next run. If at the end of all runs no viable FS type was found, it will return the highest scoring FS type (if it is > 0), otherwise it will return the FS type fallback. :param source: The source of the description :param description: The description to detect with :return: Dict with mapping of FsType() objects to scores
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L36-L65
ralphje/imagemounter
imagemounter/filesystems.py
FileSystemType.mount
def mount(self, volume): """Mounts the given volume on the provided mountpoint. The default implementation simply calls mount. :param Volume volume: The volume to be mounted :param mountpoint: The file system path to mount the filesystem on. :raises UnsupportedFilesystemError: when the volume system type can not be mounted. """ volume._make_mountpoint() try: self._call_mount(volume, volume.mountpoint, self._mount_type or self.type, self._mount_opts) except Exception: # undo the creation of the mountpoint volume._clear_mountpoint() raise
python
def mount(self, volume): """Mounts the given volume on the provided mountpoint. The default implementation simply calls mount. :param Volume volume: The volume to be mounted :param mountpoint: The file system path to mount the filesystem on. :raises UnsupportedFilesystemError: when the volume system type can not be mounted. """ volume._make_mountpoint() try: self._call_mount(volume, volume.mountpoint, self._mount_type or self.type, self._mount_opts) except Exception: # undo the creation of the mountpoint volume._clear_mountpoint() raise
Mounts the given volume on the provided mountpoint. The default implementation simply calls mount. :param Volume volume: The volume to be mounted :param mountpoint: The file system path to mount the filesystem on. :raises UnsupportedFilesystemError: when the volume system type can not be mounted.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L67-L81
ralphje/imagemounter
imagemounter/filesystems.py
FileSystemType._call_mount
def _call_mount(self, volume, mountpoint, type=None, opts=""): """Calls the mount command, specifying the mount type and mount options.""" # default arguments for calling mount if opts and not opts.endswith(','): opts += "," opts += 'loop,offset=' + str(volume.offset) + ',sizelimit=' + str(volume.size) # building the command cmd = ['mount', volume.get_raw_path(), mountpoint, '-o', opts] # add read-only if needed if not volume.disk.read_write: cmd[-1] += ',ro' # add the type if specified if type is not None: cmd += ['-t', type] _util.check_output_(cmd, stderr=subprocess.STDOUT)
python
def _call_mount(self, volume, mountpoint, type=None, opts=""): """Calls the mount command, specifying the mount type and mount options.""" # default arguments for calling mount if opts and not opts.endswith(','): opts += "," opts += 'loop,offset=' + str(volume.offset) + ',sizelimit=' + str(volume.size) # building the command cmd = ['mount', volume.get_raw_path(), mountpoint, '-o', opts] # add read-only if needed if not volume.disk.read_write: cmd[-1] += ',ro' # add the type if specified if type is not None: cmd += ['-t', type] _util.check_output_(cmd, stderr=subprocess.STDOUT)
Calls the mount command, specifying the mount type and mount options.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L83-L102
ralphje/imagemounter
imagemounter/filesystems.py
Jffs2FileSystemType.mount
def mount(self, volume): """Perform specific operations to mount a JFFS2 image. This kind of image is sometimes used for things like bios images. so external tools are required but given this method you don't have to memorize anything and it works fast and easy. Note that this module might not yet work while mounting multiple images at the same time. """ # we have to make a ram-device to store the image, we keep 20% overhead size_in_kb = int((volume.size / 1024) * 1.2) _util.check_call_(['modprobe', '-v', 'mtd']) _util.check_call_(['modprobe', '-v', 'jffs2']) _util.check_call_(['modprobe', '-v', 'mtdram', 'total_size={}'.format(size_in_kb), 'erase_size=256']) _util.check_call_(['modprobe', '-v', 'mtdblock']) _util.check_call_(['dd', 'if=' + volume.get_raw_path(), 'of=/dev/mtd0']) _util.check_call_(['mount', '-t', 'jffs2', '/dev/mtdblock0', volume.mountpoint])
python
def mount(self, volume): """Perform specific operations to mount a JFFS2 image. This kind of image is sometimes used for things like bios images. so external tools are required but given this method you don't have to memorize anything and it works fast and easy. Note that this module might not yet work while mounting multiple images at the same time. """ # we have to make a ram-device to store the image, we keep 20% overhead size_in_kb = int((volume.size / 1024) * 1.2) _util.check_call_(['modprobe', '-v', 'mtd']) _util.check_call_(['modprobe', '-v', 'jffs2']) _util.check_call_(['modprobe', '-v', 'mtdram', 'total_size={}'.format(size_in_kb), 'erase_size=256']) _util.check_call_(['modprobe', '-v', 'mtdblock']) _util.check_call_(['dd', 'if=' + volume.get_raw_path(), 'of=/dev/mtd0']) _util.check_call_(['mount', '-t', 'jffs2', '/dev/mtdblock0', volume.mountpoint])
Perform specific operations to mount a JFFS2 image. This kind of image is sometimes used for things like bios images. so external tools are required but given this method you don't have to memorize anything and it works fast and easy. Note that this module might not yet work while mounting multiple images at the same time.
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L279-L293
ralphje/imagemounter
imagemounter/filesystems.py
LuksFileSystemType.mount
def mount(self, volume): """Command that is an alternative to the :func:`mount` command that opens a LUKS container. The opened volume is added to the subvolume set of this volume. Requires the user to enter the key manually. TODO: add support for :attr:`keys` :return: the Volume contained in the LUKS container, or None on failure. :raises NoLoopbackAvailableError: when no free loopback could be found :raises IncorrectFilesystemError: when this is not a LUKS volume :raises SubsystemError: when the underlying command fails """ # Open a loopback device volume._find_loopback() # Check if this is a LUKS device # noinspection PyBroadException try: _util.check_call_(["cryptsetup", "isLuks", volume.loopback], stderr=subprocess.STDOUT) # ret = 0 if isLuks except Exception: logger.warning("Not a LUKS volume") # clean the loopback device, we want this method to be clean as possible # noinspection PyBroadException try: volume._free_loopback() except Exception: pass raise IncorrectFilesystemError() try: extra_args = [] key = None if volume.key: t, v = volume.key.split(':', 1) if t == 'p': # passphrase key = v elif t == 'f': # key-file extra_args = ['--key-file', v] elif t == 'm': # master-key-file extra_args = ['--master-key-file', v] else: logger.warning("No key material provided for %s", volume) except ValueError: logger.exception("Invalid key material provided (%s) for %s. Expecting [arg]:[value]", volume.key, volume) volume._free_loopback() raise ArgumentError() # Open the LUKS container volume._paths['luks'] = 'image_mounter_luks_' + str(random.randint(10000, 99999)) # noinspection PyBroadException try: cmd = ["cryptsetup", "luksOpen", volume.loopback, volume._paths['luks']] cmd.extend(extra_args) if not volume.disk.read_write: cmd.insert(1, '-r') if key is not None: logger.debug('$ {0}'.format(' '.join(cmd))) # for py 3.2+, we could have used input=, but that doesn't exist in py2.7. p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate(key.encode("utf-8")) p.wait() retcode = p.poll() if retcode: raise KeyInvalidError() else: _util.check_call_(cmd) except ImageMounterError: del volume._paths['luks'] volume._free_loopback() raise except Exception as e: del volume._paths['luks'] volume._free_loopback() raise SubsystemError(e) size = None # noinspection PyBroadException try: result = _util.check_output_(["cryptsetup", "status", volume._paths['luks']]) for l in result.splitlines(): if "size:" in l and "key" not in l: size = int(l.replace("size:", "").replace("sectors", "").strip()) * volume.disk.block_size except Exception: pass container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=size) container.info['fsdescription'] = 'LUKS Volume' return container
python
def mount(self, volume): """Command that is an alternative to the :func:`mount` command that opens a LUKS container. The opened volume is added to the subvolume set of this volume. Requires the user to enter the key manually. TODO: add support for :attr:`keys` :return: the Volume contained in the LUKS container, or None on failure. :raises NoLoopbackAvailableError: when no free loopback could be found :raises IncorrectFilesystemError: when this is not a LUKS volume :raises SubsystemError: when the underlying command fails """ # Open a loopback device volume._find_loopback() # Check if this is a LUKS device # noinspection PyBroadException try: _util.check_call_(["cryptsetup", "isLuks", volume.loopback], stderr=subprocess.STDOUT) # ret = 0 if isLuks except Exception: logger.warning("Not a LUKS volume") # clean the loopback device, we want this method to be clean as possible # noinspection PyBroadException try: volume._free_loopback() except Exception: pass raise IncorrectFilesystemError() try: extra_args = [] key = None if volume.key: t, v = volume.key.split(':', 1) if t == 'p': # passphrase key = v elif t == 'f': # key-file extra_args = ['--key-file', v] elif t == 'm': # master-key-file extra_args = ['--master-key-file', v] else: logger.warning("No key material provided for %s", volume) except ValueError: logger.exception("Invalid key material provided (%s) for %s. Expecting [arg]:[value]", volume.key, volume) volume._free_loopback() raise ArgumentError() # Open the LUKS container volume._paths['luks'] = 'image_mounter_luks_' + str(random.randint(10000, 99999)) # noinspection PyBroadException try: cmd = ["cryptsetup", "luksOpen", volume.loopback, volume._paths['luks']] cmd.extend(extra_args) if not volume.disk.read_write: cmd.insert(1, '-r') if key is not None: logger.debug('$ {0}'.format(' '.join(cmd))) # for py 3.2+, we could have used input=, but that doesn't exist in py2.7. p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate(key.encode("utf-8")) p.wait() retcode = p.poll() if retcode: raise KeyInvalidError() else: _util.check_call_(cmd) except ImageMounterError: del volume._paths['luks'] volume._free_loopback() raise except Exception as e: del volume._paths['luks'] volume._free_loopback() raise SubsystemError(e) size = None # noinspection PyBroadException try: result = _util.check_output_(["cryptsetup", "status", volume._paths['luks']]) for l in result.splitlines(): if "size:" in l and "key" not in l: size = int(l.replace("size:", "").replace("sectors", "").strip()) * volume.disk.block_size except Exception: pass container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=size) container.info['fsdescription'] = 'LUKS Volume' return container
Command that is an alternative to the :func:`mount` command that opens a LUKS container. The opened volume is added to the subvolume set of this volume. Requires the user to enter the key manually. TODO: add support for :attr:`keys` :return: the Volume contained in the LUKS container, or None on failure. :raises NoLoopbackAvailableError: when no free loopback could be found :raises IncorrectFilesystemError: when this is not a LUKS volume :raises SubsystemError: when the underlying command fails
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L307-L398
ralphje/imagemounter
imagemounter/filesystems.py
BdeFileSystemType.mount
def mount(self, volume): """Mounts a BDE container. Uses key material provided by the :attr:`keys` attribute. The key material should be provided in the same format as to :cmd:`bdemount`, used as follows: k:full volume encryption and tweak key p:passphrase r:recovery password s:file to startup key (.bek) :return: the Volume contained in the BDE container :raises ArgumentError: if the keys argument is invalid :raises SubsystemError: when the underlying command fails """ volume._paths['bde'] = tempfile.mkdtemp(prefix='image_mounter_bde_') try: if volume.key: t, v = volume.key.split(':', 1) key = ['-' + t, v] else: logger.warning("No key material provided for %s", volume) key = [] except ValueError: logger.exception("Invalid key material provided (%s) for %s. Expecting [arg]:[value]", volume.key, volume) raise ArgumentError() # noinspection PyBroadException try: cmd = ["bdemount", volume.get_raw_path(), volume._paths['bde'], '-o', str(volume.offset)] cmd.extend(key) _util.check_call_(cmd) except Exception as e: del volume._paths['bde'] logger.exception("Failed mounting BDE volume %s.", volume) raise SubsystemError(e) container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=volume.size) container.info['fsdescription'] = 'BDE Volume' return container
python
def mount(self, volume): """Mounts a BDE container. Uses key material provided by the :attr:`keys` attribute. The key material should be provided in the same format as to :cmd:`bdemount`, used as follows: k:full volume encryption and tweak key p:passphrase r:recovery password s:file to startup key (.bek) :return: the Volume contained in the BDE container :raises ArgumentError: if the keys argument is invalid :raises SubsystemError: when the underlying command fails """ volume._paths['bde'] = tempfile.mkdtemp(prefix='image_mounter_bde_') try: if volume.key: t, v = volume.key.split(':', 1) key = ['-' + t, v] else: logger.warning("No key material provided for %s", volume) key = [] except ValueError: logger.exception("Invalid key material provided (%s) for %s. Expecting [arg]:[value]", volume.key, volume) raise ArgumentError() # noinspection PyBroadException try: cmd = ["bdemount", volume.get_raw_path(), volume._paths['bde'], '-o', str(volume.offset)] cmd.extend(key) _util.check_call_(cmd) except Exception as e: del volume._paths['bde'] logger.exception("Failed mounting BDE volume %s.", volume) raise SubsystemError(e) container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=volume.size) container.info['fsdescription'] = 'BDE Volume' return container
Mounts a BDE container. Uses key material provided by the :attr:`keys` attribute. The key material should be provided in the same format as to :cmd:`bdemount`, used as follows: k:full volume encryption and tweak key p:passphrase r:recovery password s:file to startup key (.bek) :return: the Volume contained in the BDE container :raises ArgumentError: if the keys argument is invalid :raises SubsystemError: when the underlying command fails
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L411-L451
ralphje/imagemounter
imagemounter/filesystems.py
LvmFileSystemType.mount
def mount(self, volume): """Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group """ os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find free loopback device volume._find_loopback() time.sleep(0.2) try: # Scan for new lvm volumes result = _util.check_output_(["lvm", "pvscan"]) for l in result.splitlines(): if volume.loopback in l or (volume.offset == 0 and volume.get_raw_path() in l): for vg in re.findall(r'VG (\S+)', l): volume.info['volume_group'] = vg if not volume.info.get('volume_group'): logger.warning("Volume is not a volume group. (Searching for %s)", volume.loopback) raise IncorrectFilesystemError() # Enable lvm volumes _util.check_call_(["lvm", "vgchange", "-a", "y", volume.info['volume_group']], stdout=subprocess.PIPE) except Exception: volume._free_loopback() raise volume.volumes.vstype = 'lvm' # fills it up. for _ in volume.volumes.detect_volumes('lvm'): pass
python
def mount(self, volume): """Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group """ os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find free loopback device volume._find_loopback() time.sleep(0.2) try: # Scan for new lvm volumes result = _util.check_output_(["lvm", "pvscan"]) for l in result.splitlines(): if volume.loopback in l or (volume.offset == 0 and volume.get_raw_path() in l): for vg in re.findall(r'VG (\S+)', l): volume.info['volume_group'] = vg if not volume.info.get('volume_group'): logger.warning("Volume is not a volume group. (Searching for %s)", volume.loopback) raise IncorrectFilesystemError() # Enable lvm volumes _util.check_call_(["lvm", "vgchange", "-a", "y", volume.info['volume_group']], stdout=subprocess.PIPE) except Exception: volume._free_loopback() raise volume.volumes.vstype = 'lvm' # fills it up. for _ in volume.volumes.detect_volumes('lvm'): pass
Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L460-L494
ralphje/imagemounter
imagemounter/filesystems.py
RaidFileSystemType.mount
def mount(self, volume): """Add the volume to a RAID system. The RAID array is activated as soon as the array can be activated. :raises NoLoopbackAvailableError: if no loopback device was found """ volume._find_loopback() raid_status = None try: # use mdadm to mount the loopback to a md device # incremental and run as soon as available output = _util.check_output_(['mdadm', '-IR', volume.loopback], stderr=subprocess.STDOUT) match = re.findall(r"attached to ([^ ,]+)", output) if match: volume._paths['md'] = os.path.realpath(match[0]) if 'which is already active' in output: logger.info("RAID is already active in other volume, using %s", volume._paths['md']) raid_status = 'active' elif 'not enough to start' in output: volume._paths['md'] = volume._paths['md'].replace("/dev/md/", "/dev/md") logger.info("RAID volume added, but not enough to start %s", volume._paths['md']) raid_status = 'waiting' else: logger.info("RAID started at {0}".format(volume._paths['md'])) raid_status = 'active' except Exception as e: logger.exception("Failed mounting RAID.") volume._free_loopback() raise SubsystemError(e) # search for the RAID volume for v in volume.disk.parser.get_volumes(): if v._paths.get("md") == volume._paths['md'] and v.volumes: logger.debug("Adding existing volume %s to volume %s", v.volumes[0], volume) v.volumes[0].info['raid_status'] = raid_status volume.volumes.volumes.append(v.volumes[0]) return v.volumes[0] else: logger.debug("Creating RAID volume for %s", self) container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=volume.size) container.info['fsdescription'] = 'RAID Volume' container.info['raid_status'] = raid_status return container
python
def mount(self, volume): """Add the volume to a RAID system. The RAID array is activated as soon as the array can be activated. :raises NoLoopbackAvailableError: if no loopback device was found """ volume._find_loopback() raid_status = None try: # use mdadm to mount the loopback to a md device # incremental and run as soon as available output = _util.check_output_(['mdadm', '-IR', volume.loopback], stderr=subprocess.STDOUT) match = re.findall(r"attached to ([^ ,]+)", output) if match: volume._paths['md'] = os.path.realpath(match[0]) if 'which is already active' in output: logger.info("RAID is already active in other volume, using %s", volume._paths['md']) raid_status = 'active' elif 'not enough to start' in output: volume._paths['md'] = volume._paths['md'].replace("/dev/md/", "/dev/md") logger.info("RAID volume added, but not enough to start %s", volume._paths['md']) raid_status = 'waiting' else: logger.info("RAID started at {0}".format(volume._paths['md'])) raid_status = 'active' except Exception as e: logger.exception("Failed mounting RAID.") volume._free_loopback() raise SubsystemError(e) # search for the RAID volume for v in volume.disk.parser.get_volumes(): if v._paths.get("md") == volume._paths['md'] and v.volumes: logger.debug("Adding existing volume %s to volume %s", v.volumes[0], volume) v.volumes[0].info['raid_status'] = raid_status volume.volumes.volumes.append(v.volumes[0]) return v.volumes[0] else: logger.debug("Creating RAID volume for %s", self) container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=volume.size) container.info['fsdescription'] = 'RAID Volume' container.info['raid_status'] = raid_status return container
Add the volume to a RAID system. The RAID array is activated as soon as the array can be activated. :raises NoLoopbackAvailableError: if no loopback device was found
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L508-L552
mbr/latex
latex/__init__.py
escape
def escape(s, fold_newlines=True): """Escapes a string to make it usable in LaTeX text mode. Will replace special characters as well as newlines. Some problematic characters like ``[`` and ``]`` are escaped into groups (e.g. ``{[}``), because they tend to cause problems when mixed with ``\\`` newlines otherwise. :param s: The string to escape. :param fold_newlines: If true, multiple newlines will be reduced to just a single ``\\``. Otherwise, whitespace is kept intact by adding multiple ``[n\baselineskip]``. """ def sub(m): c = m.group() if c in CHAR_ESCAPE: return CHAR_ESCAPE[c] if c.isspace(): if fold_newlines: return r'\\' return r'\\[{}\baselineskip]'.format(len(c)) return ESCAPE_RE.sub(sub, s)
python
def escape(s, fold_newlines=True): """Escapes a string to make it usable in LaTeX text mode. Will replace special characters as well as newlines. Some problematic characters like ``[`` and ``]`` are escaped into groups (e.g. ``{[}``), because they tend to cause problems when mixed with ``\\`` newlines otherwise. :param s: The string to escape. :param fold_newlines: If true, multiple newlines will be reduced to just a single ``\\``. Otherwise, whitespace is kept intact by adding multiple ``[n\baselineskip]``. """ def sub(m): c = m.group() if c in CHAR_ESCAPE: return CHAR_ESCAPE[c] if c.isspace(): if fold_newlines: return r'\\' return r'\\[{}\baselineskip]'.format(len(c)) return ESCAPE_RE.sub(sub, s)
Escapes a string to make it usable in LaTeX text mode. Will replace special characters as well as newlines. Some problematic characters like ``[`` and ``]`` are escaped into groups (e.g. ``{[}``), because they tend to cause problems when mixed with ``\\`` newlines otherwise. :param s: The string to escape. :param fold_newlines: If true, multiple newlines will be reduced to just a single ``\\``. Otherwise, whitespace is kept intact by adding multiple ``[n\baselineskip]``.
https://github.com/mbr/latex/blob/f96cb9125b4f570fc2ffc5ae628e2f4069b2f3cf/latex/__init__.py#L37-L61
mbr/latex
latex/build.py
build_pdf
def build_pdf(source, texinputs=[], builder=None): """Builds a LaTeX source to PDF. Will automatically instantiate an available builder (or raise a :class:`exceptions.RuntimeError` if none are available) and build the supplied source with it. Parameters are passed on to the builder's :meth:`~latex.build.LatexBuilder.build_pdf` function. :param builder: Specify which builder should be used - ``latexmk``, ``pdflatex`` or ``xelatexmk``. """ if builder is None: builders = PREFERRED_BUILDERS elif builder not in BUILDERS: raise RuntimeError('Invalid Builder specified') else: builders = (builder, ) for bld in builders: bld_cls = BUILDERS[bld] builder = bld_cls() if not builder.is_available(): continue return builder.build_pdf(source, texinputs) else: raise RuntimeError('No available builder could be instantiated. ' 'Please make sure LaTeX is installed.')
python
def build_pdf(source, texinputs=[], builder=None): """Builds a LaTeX source to PDF. Will automatically instantiate an available builder (or raise a :class:`exceptions.RuntimeError` if none are available) and build the supplied source with it. Parameters are passed on to the builder's :meth:`~latex.build.LatexBuilder.build_pdf` function. :param builder: Specify which builder should be used - ``latexmk``, ``pdflatex`` or ``xelatexmk``. """ if builder is None: builders = PREFERRED_BUILDERS elif builder not in BUILDERS: raise RuntimeError('Invalid Builder specified') else: builders = (builder, ) for bld in builders: bld_cls = BUILDERS[bld] builder = bld_cls() if not builder.is_available(): continue return builder.build_pdf(source, texinputs) else: raise RuntimeError('No available builder could be instantiated. ' 'Please make sure LaTeX is installed.')
Builds a LaTeX source to PDF. Will automatically instantiate an available builder (or raise a :class:`exceptions.RuntimeError` if none are available) and build the supplied source with it. Parameters are passed on to the builder's :meth:`~latex.build.LatexBuilder.build_pdf` function. :param builder: Specify which builder should be used - ``latexmk``, ``pdflatex`` or ``xelatexmk``.
https://github.com/mbr/latex/blob/f96cb9125b4f570fc2ffc5ae628e2f4069b2f3cf/latex/build.py#L207-L235
mbr/latex
latex/errors.py
parse_log
def parse_log(log, context_size=3): """Parses latex log output and tries to extract error messages. Requires ``-file-line-error`` to be active. :param log: The contents of the logfile as a string. :param context_size: Number of lines to keep as context, including the original error line. :return: A dictionary containig ``line`` (line number, an int), ``error``, (the error message), ``filename`` (name of the temporary file used for building) and ``context`` (list of lines, starting with with the error line). """ lines = log.splitlines() errors = [] for n, line in enumerate(lines): m = LATEX_ERR_RE.match(line) if m: err = m.groupdict().copy() err['context'] = lines[n:n + context_size] try: err['line'] = int(err['line']) except TypeError: pass # ignore invalid int conversion errors.append(err) return errors
python
def parse_log(log, context_size=3): """Parses latex log output and tries to extract error messages. Requires ``-file-line-error`` to be active. :param log: The contents of the logfile as a string. :param context_size: Number of lines to keep as context, including the original error line. :return: A dictionary containig ``line`` (line number, an int), ``error``, (the error message), ``filename`` (name of the temporary file used for building) and ``context`` (list of lines, starting with with the error line). """ lines = log.splitlines() errors = [] for n, line in enumerate(lines): m = LATEX_ERR_RE.match(line) if m: err = m.groupdict().copy() err['context'] = lines[n:n + context_size] try: err['line'] = int(err['line']) except TypeError: pass # ignore invalid int conversion errors.append(err) return errors
Parses latex log output and tries to extract error messages. Requires ``-file-line-error`` to be active. :param log: The contents of the logfile as a string. :param context_size: Number of lines to keep as context, including the original error line. :return: A dictionary containig ``line`` (line number, an int), ``error``, (the error message), ``filename`` (name of the temporary file used for building) and ``context`` (list of lines, starting with with the error line).
https://github.com/mbr/latex/blob/f96cb9125b4f570fc2ffc5ae628e2f4069b2f3cf/latex/errors.py#L7-L34
mbr/latex
latex/jinja2.py
make_env
def make_env(*args, **kwargs): """Creates an :py:class:`~jinja2.Environment` with different defaults. Per default, ``autoescape`` will be disabled and ``trim_blocks`` enabled. All start/end/prefix strings will be changed for a more LaTeX-friendly version (see the docs for details). Any arguments will be passed on to the :py:class:`~jinja2.Environment` constructor and override new values. Finally, the ``|e``, ``|escape`` and ``|forceescape`` filters will be replaced with a call to :func:`latex.escape`.""" ka = ENV_ARGS.copy() ka.update(kwargs) env = Environment(*args, **ka) env.filters['e'] = LatexMarkup.escape env.filters['escape'] = LatexMarkup.escape env.filters['forceescape'] = LatexMarkup.escape # FIXME: this is a bug return env
python
def make_env(*args, **kwargs): """Creates an :py:class:`~jinja2.Environment` with different defaults. Per default, ``autoescape`` will be disabled and ``trim_blocks`` enabled. All start/end/prefix strings will be changed for a more LaTeX-friendly version (see the docs for details). Any arguments will be passed on to the :py:class:`~jinja2.Environment` constructor and override new values. Finally, the ``|e``, ``|escape`` and ``|forceescape`` filters will be replaced with a call to :func:`latex.escape`.""" ka = ENV_ARGS.copy() ka.update(kwargs) env = Environment(*args, **ka) env.filters['e'] = LatexMarkup.escape env.filters['escape'] = LatexMarkup.escape env.filters['forceescape'] = LatexMarkup.escape # FIXME: this is a bug return env
Creates an :py:class:`~jinja2.Environment` with different defaults. Per default, ``autoescape`` will be disabled and ``trim_blocks`` enabled. All start/end/prefix strings will be changed for a more LaTeX-friendly version (see the docs for details). Any arguments will be passed on to the :py:class:`~jinja2.Environment` constructor and override new values. Finally, the ``|e``, ``|escape`` and ``|forceescape`` filters will be replaced with a call to :func:`latex.escape`.
https://github.com/mbr/latex/blob/f96cb9125b4f570fc2ffc5ae628e2f4069b2f3cf/latex/jinja2.py#L41-L60
e-dard/flask-s3
flask_s3.py
split_metadata_params
def split_metadata_params(headers): """ Given a dict of headers for s3, seperates those that are boto3 parameters and those that must be metadata """ params = {} metadata = {} for header_name in headers: if header_name.lower() in header_mapping: params[header_mapping[header_name.lower()]] = headers[header_name] else: metadata[header_name] = headers[header_name] return metadata, params
python
def split_metadata_params(headers): """ Given a dict of headers for s3, seperates those that are boto3 parameters and those that must be metadata """ params = {} metadata = {} for header_name in headers: if header_name.lower() in header_mapping: params[header_mapping[header_name.lower()]] = headers[header_name] else: metadata[header_name] = headers[header_name] return metadata, params
Given a dict of headers for s3, seperates those that are boto3 parameters and those that must be metadata
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L63-L76
e-dard/flask-s3
flask_s3.py
hash_file
def hash_file(filename): """ Generate a hash for the contents of a file """ hasher = hashlib.sha1() with open(filename, 'rb') as f: buf = f.read(65536) while len(buf) > 0: hasher.update(buf) buf = f.read(65536) return hasher.hexdigest()
python
def hash_file(filename): """ Generate a hash for the contents of a file """ hasher = hashlib.sha1() with open(filename, 'rb') as f: buf = f.read(65536) while len(buf) > 0: hasher.update(buf) buf = f.read(65536) return hasher.hexdigest()
Generate a hash for the contents of a file
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L86-L97
e-dard/flask-s3
flask_s3.py
_get_bucket_name
def _get_bucket_name(**values): """ Generates the bucket name for url_for. """ app = current_app # manage other special values, all have no meaning for static urls values.pop('_external', False) # external has no meaning here values.pop('_anchor', None) # anchor as well values.pop('_method', None) # method too url_style = get_setting('FLASKS3_URL_STYLE', app) if url_style == 'host': url_format = '{bucket_name}.{bucket_domain}' elif url_style == 'path': url_format = '{bucket_domain}/{bucket_name}' else: raise ValueError('Invalid S3 URL style: "{}"'.format(url_style)) if get_setting('FLASKS3_CDN_DOMAIN', app): bucket_path = '{}'.format(get_setting('FLASKS3_CDN_DOMAIN', app)) else: bucket_path = url_format.format( bucket_name=get_setting('FLASKS3_BUCKET_NAME', app), bucket_domain=get_setting('FLASKS3_BUCKET_DOMAIN', app), ) bucket_path += _get_statics_prefix(app).rstrip('/') return bucket_path, values
python
def _get_bucket_name(**values): """ Generates the bucket name for url_for. """ app = current_app # manage other special values, all have no meaning for static urls values.pop('_external', False) # external has no meaning here values.pop('_anchor', None) # anchor as well values.pop('_method', None) # method too url_style = get_setting('FLASKS3_URL_STYLE', app) if url_style == 'host': url_format = '{bucket_name}.{bucket_domain}' elif url_style == 'path': url_format = '{bucket_domain}/{bucket_name}' else: raise ValueError('Invalid S3 URL style: "{}"'.format(url_style)) if get_setting('FLASKS3_CDN_DOMAIN', app): bucket_path = '{}'.format(get_setting('FLASKS3_CDN_DOMAIN', app)) else: bucket_path = url_format.format( bucket_name=get_setting('FLASKS3_BUCKET_NAME', app), bucket_domain=get_setting('FLASKS3_BUCKET_DOMAIN', app), ) bucket_path += _get_statics_prefix(app).rstrip('/') return bucket_path, values
Generates the bucket name for url_for.
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L100-L129
e-dard/flask-s3
flask_s3.py
url_for
def url_for(endpoint, **values): """ Generates a URL to the given endpoint. If the endpoint is for a static resource then an Amazon S3 URL is generated, otherwise the call is passed on to `flask.url_for`. Because this function is set as a jinja environment variable when `FlaskS3.init_app` is invoked, this function replaces `flask.url_for` in templates automatically. It is unlikely that this function will need to be directly called from within your application code, unless you need to refer to static assets outside of your templates. """ app = current_app if app.config.get('TESTING', False) and not app.config.get('FLASKS3_OVERRIDE_TESTING', True): return flask_url_for(endpoint, **values) if 'FLASKS3_BUCKET_NAME' not in app.config: raise ValueError("FLASKS3_BUCKET_NAME not found in app configuration.") if endpoint == 'static' or endpoint.endswith('.static'): scheme = 'https' if not app.config.get("FLASKS3_USE_HTTPS", True): scheme = 'http' # allow per url override for scheme scheme = values.pop('_scheme', scheme) bucket_path, values = _get_bucket_name(**values) urls = app.url_map.bind(bucket_path, url_scheme=scheme) built = urls.build(endpoint, values=values, force_external=True) return built return flask_url_for(endpoint, **values)
python
def url_for(endpoint, **values): """ Generates a URL to the given endpoint. If the endpoint is for a static resource then an Amazon S3 URL is generated, otherwise the call is passed on to `flask.url_for`. Because this function is set as a jinja environment variable when `FlaskS3.init_app` is invoked, this function replaces `flask.url_for` in templates automatically. It is unlikely that this function will need to be directly called from within your application code, unless you need to refer to static assets outside of your templates. """ app = current_app if app.config.get('TESTING', False) and not app.config.get('FLASKS3_OVERRIDE_TESTING', True): return flask_url_for(endpoint, **values) if 'FLASKS3_BUCKET_NAME' not in app.config: raise ValueError("FLASKS3_BUCKET_NAME not found in app configuration.") if endpoint == 'static' or endpoint.endswith('.static'): scheme = 'https' if not app.config.get("FLASKS3_USE_HTTPS", True): scheme = 'http' # allow per url override for scheme scheme = values.pop('_scheme', scheme) bucket_path, values = _get_bucket_name(**values) urls = app.url_map.bind(bucket_path, url_scheme=scheme) built = urls.build(endpoint, values=values, force_external=True) return built return flask_url_for(endpoint, **values)
Generates a URL to the given endpoint. If the endpoint is for a static resource then an Amazon S3 URL is generated, otherwise the call is passed on to `flask.url_for`. Because this function is set as a jinja environment variable when `FlaskS3.init_app` is invoked, this function replaces `flask.url_for` in templates automatically. It is unlikely that this function will need to be directly called from within your application code, unless you need to refer to static assets outside of your templates.
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L132-L165
e-dard/flask-s3
flask_s3.py
_bp_static_url
def _bp_static_url(blueprint): """ builds the absolute url path for a blueprint's static folder """ u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or '')) return u
python
def _bp_static_url(blueprint): """ builds the absolute url path for a blueprint's static folder """ u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or '')) return u
builds the absolute url path for a blueprint's static folder
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L168-L171
e-dard/flask-s3
flask_s3.py
_gather_files
def _gather_files(app, hidden, filepath_filter_regex=None): """ Gets all files in static folders and returns in dict.""" dirs = [(six.text_type(app.static_folder), app.static_url_path)] if hasattr(app, 'blueprints'): blueprints = app.blueprints.values() bp_details = lambda x: (x.static_folder, _bp_static_url(x)) dirs.extend([bp_details(x) for x in blueprints if x.static_folder]) valid_files = defaultdict(list) for static_folder, static_url_loc in dirs: if not os.path.isdir(static_folder): logger.warning("WARNING - [%s does not exist]" % static_folder) else: logger.debug("Checking static folder: %s" % static_folder) for root, _, files in os.walk(static_folder): relative_folder = re.sub(r'^/', '', root.replace(static_folder, '')) files = [os.path.join(root, x) for x in files if ( (hidden or x[0] != '.') and # Skip this file if the filter regex is # defined, and this file's path is a # negative match. (filepath_filter_regex == None or re.search( filepath_filter_regex, os.path.join(relative_folder, x))))] if files: valid_files[(static_folder, static_url_loc)].extend(files) return valid_files
python
def _gather_files(app, hidden, filepath_filter_regex=None): """ Gets all files in static folders and returns in dict.""" dirs = [(six.text_type(app.static_folder), app.static_url_path)] if hasattr(app, 'blueprints'): blueprints = app.blueprints.values() bp_details = lambda x: (x.static_folder, _bp_static_url(x)) dirs.extend([bp_details(x) for x in blueprints if x.static_folder]) valid_files = defaultdict(list) for static_folder, static_url_loc in dirs: if not os.path.isdir(static_folder): logger.warning("WARNING - [%s does not exist]" % static_folder) else: logger.debug("Checking static folder: %s" % static_folder) for root, _, files in os.walk(static_folder): relative_folder = re.sub(r'^/', '', root.replace(static_folder, '')) files = [os.path.join(root, x) for x in files if ( (hidden or x[0] != '.') and # Skip this file if the filter regex is # defined, and this file's path is a # negative match. (filepath_filter_regex == None or re.search( filepath_filter_regex, os.path.join(relative_folder, x))))] if files: valid_files[(static_folder, static_url_loc)].extend(files) return valid_files
Gets all files in static folders and returns in dict.
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L174-L204
e-dard/flask-s3
flask_s3.py
_static_folder_path
def _static_folder_path(static_url, static_folder, static_asset): """ Returns a path to a file based on the static folder, and not on the filesystem holding the file. Returns a path relative to static_url for static_asset """ # first get the asset path relative to the static folder. # static_asset is not simply a filename because it could be # sub-directory then file etc. if not static_asset.startswith(static_folder): raise ValueError("%s static asset must be under %s static folder" % (static_asset, static_folder)) rel_asset = static_asset[len(static_folder):] # Now bolt the static url path and the relative asset location together return '%s/%s' % (static_url.rstrip('/'), rel_asset.lstrip('/'))
python
def _static_folder_path(static_url, static_folder, static_asset): """ Returns a path to a file based on the static folder, and not on the filesystem holding the file. Returns a path relative to static_url for static_asset """ # first get the asset path relative to the static folder. # static_asset is not simply a filename because it could be # sub-directory then file etc. if not static_asset.startswith(static_folder): raise ValueError("%s static asset must be under %s static folder" % (static_asset, static_folder)) rel_asset = static_asset[len(static_folder):] # Now bolt the static url path and the relative asset location together return '%s/%s' % (static_url.rstrip('/'), rel_asset.lstrip('/'))
Returns a path to a file based on the static folder, and not on the filesystem holding the file. Returns a path relative to static_url for static_asset
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L212-L227
e-dard/flask-s3
flask_s3.py
_write_files
def _write_files(s3, app, static_url_loc, static_folder, files, bucket, ex_keys=None, hashes=None): """ Writes all the files inside a static folder to S3. """ should_gzip = app.config.get('FLASKS3_GZIP') add_mime = app.config.get('FLASKS3_FORCE_MIMETYPE') gzip_include_only = app.config.get('FLASKS3_GZIP_ONLY_EXTS') new_hashes = [] static_folder_rel = _path_to_relative_url(static_folder) for file_path in files: per_file_should_gzip = should_gzip asset_loc = _path_to_relative_url(file_path) full_key_name = _static_folder_path(static_url_loc, static_folder_rel, asset_loc) key_name = full_key_name.lstrip("/") logger.debug("Uploading {} to {} as {}".format(file_path, bucket, key_name)) exclude = False if app.config.get('FLASKS3_ONLY_MODIFIED', False): file_hash = hash_file(file_path) new_hashes.append((full_key_name, file_hash)) if hashes and hashes.get(full_key_name, None) == file_hash: exclude = True if ex_keys and full_key_name in ex_keys or exclude: logger.debug("%s excluded from upload" % key_name) else: h = {} # Set more custom headers if the filepath matches certain # configured regular expressions. filepath_headers = app.config.get('FLASKS3_FILEPATH_HEADERS') if filepath_headers: for filepath_regex, headers in six.iteritems(filepath_headers): if re.search(filepath_regex, file_path): for header, value in six.iteritems(headers): h[header] = value # check for extension, only if there are extensions provided if per_file_should_gzip and gzip_include_only: if os.path.splitext(file_path)[1] not in gzip_include_only: per_file_should_gzip = False if per_file_should_gzip: h["content-encoding"] = "gzip" if (add_mime or per_file_should_gzip) and "content-type" not in h: # When we use GZIP we have to explicitly set the content type # or if the mime flag is True (mimetype, encoding) = mimetypes.guess_type(file_path, False) if mimetype: h["content-type"] = mimetype else: logger.warn("Unable to detect mimetype for %s" % file_path) file_mode = 'rb' if six.PY3 else 'r' with open(file_path, file_mode) as fp: merged_dicts = merge_two_dicts(get_setting('FLASKS3_HEADERS', app), h) metadata, params = split_metadata_params(merged_dicts) if per_file_should_gzip: compressed = six.BytesIO() z = gzip.GzipFile(os.path.basename(file_path), 'wb', 9, compressed) z.write(fp.read()) z.close() data = compressed.getvalue() else: data = fp.read() s3.put_object(Bucket=bucket, Key=key_name, Body=data, ACL="public-read", Metadata=metadata, **params) return new_hashes
python
def _write_files(s3, app, static_url_loc, static_folder, files, bucket, ex_keys=None, hashes=None): """ Writes all the files inside a static folder to S3. """ should_gzip = app.config.get('FLASKS3_GZIP') add_mime = app.config.get('FLASKS3_FORCE_MIMETYPE') gzip_include_only = app.config.get('FLASKS3_GZIP_ONLY_EXTS') new_hashes = [] static_folder_rel = _path_to_relative_url(static_folder) for file_path in files: per_file_should_gzip = should_gzip asset_loc = _path_to_relative_url(file_path) full_key_name = _static_folder_path(static_url_loc, static_folder_rel, asset_loc) key_name = full_key_name.lstrip("/") logger.debug("Uploading {} to {} as {}".format(file_path, bucket, key_name)) exclude = False if app.config.get('FLASKS3_ONLY_MODIFIED', False): file_hash = hash_file(file_path) new_hashes.append((full_key_name, file_hash)) if hashes and hashes.get(full_key_name, None) == file_hash: exclude = True if ex_keys and full_key_name in ex_keys or exclude: logger.debug("%s excluded from upload" % key_name) else: h = {} # Set more custom headers if the filepath matches certain # configured regular expressions. filepath_headers = app.config.get('FLASKS3_FILEPATH_HEADERS') if filepath_headers: for filepath_regex, headers in six.iteritems(filepath_headers): if re.search(filepath_regex, file_path): for header, value in six.iteritems(headers): h[header] = value # check for extension, only if there are extensions provided if per_file_should_gzip and gzip_include_only: if os.path.splitext(file_path)[1] not in gzip_include_only: per_file_should_gzip = False if per_file_should_gzip: h["content-encoding"] = "gzip" if (add_mime or per_file_should_gzip) and "content-type" not in h: # When we use GZIP we have to explicitly set the content type # or if the mime flag is True (mimetype, encoding) = mimetypes.guess_type(file_path, False) if mimetype: h["content-type"] = mimetype else: logger.warn("Unable to detect mimetype for %s" % file_path) file_mode = 'rb' if six.PY3 else 'r' with open(file_path, file_mode) as fp: merged_dicts = merge_two_dicts(get_setting('FLASKS3_HEADERS', app), h) metadata, params = split_metadata_params(merged_dicts) if per_file_should_gzip: compressed = six.BytesIO() z = gzip.GzipFile(os.path.basename(file_path), 'wb', 9, compressed) z.write(fp.read()) z.close() data = compressed.getvalue() else: data = fp.read() s3.put_object(Bucket=bucket, Key=key_name, Body=data, ACL="public-read", Metadata=metadata, **params) return new_hashes
Writes all the files inside a static folder to S3.
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L230-L308
e-dard/flask-s3
flask_s3.py
get_setting
def get_setting(name, app=None): """ Returns the value for `name` settings (looks into `app` config, and into DEFAULT_SETTINGS). Returns None if not set. :param name: (str) name of a setting (e.g. FLASKS3_URL_STYLE) :param app: Flask app instance :return: setting value or None """ default_value = DEFAULT_SETTINGS.get(name, None) return app.config.get(name, default_value) if app else default_value
python
def get_setting(name, app=None): """ Returns the value for `name` settings (looks into `app` config, and into DEFAULT_SETTINGS). Returns None if not set. :param name: (str) name of a setting (e.g. FLASKS3_URL_STYLE) :param app: Flask app instance :return: setting value or None """ default_value = DEFAULT_SETTINGS.get(name, None) return app.config.get(name, default_value) if app else default_value
Returns the value for `name` settings (looks into `app` config, and into DEFAULT_SETTINGS). Returns None if not set. :param name: (str) name of a setting (e.g. FLASKS3_URL_STYLE) :param app: Flask app instance :return: setting value or None
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L321-L333
e-dard/flask-s3
flask_s3.py
create_all
def create_all(app, user=None, password=None, bucket_name=None, location=None, include_hidden=False, filepath_filter_regex=None, put_bucket_acl=True): """ Uploads of the static assets associated with a Flask application to Amazon S3. All static assets are identified on the local filesystem, including any static assets associated with *registered* blueprints. In turn, each asset is uploaded to the bucket described by `bucket_name`. If the bucket does not exist then it is created. Flask-S3 creates the same relative static asset folder structure on S3 as can be found within your Flask application. Many of the optional arguments to `create_all` can be specified instead in your application's configuration using the Flask-S3 `configuration`_ variables. :param app: a :class:`flask.Flask` application object. :param user: an AWS Access Key ID. You can find this key in the Security Credentials section of your AWS account. :type user: `basestring` or None :param password: an AWS Secret Access Key. You can find this key in the Security Credentials section of your AWS account. :type password: `basestring` or None :param bucket_name: the name of the bucket you wish to server your static assets from. **Note**: while a valid character, it is recommended that you do not include periods in bucket_name if you wish to serve over HTTPS. See Amazon's `bucket restrictions`_ for more details. :type bucket_name: `basestring` or None :param location: the AWS region to host the bucket in; an empty string indicates the default region should be used, which is the US Standard region. Possible location values include: `'DEFAULT'`, `'EU'`, `'us-east-1'`, `'us-west-1'`, `'us-west-2'`, `'ap-south-1'`, `'ap-northeast-2'`, `'ap-southeast-1'`, `'ap-southeast-2'`, `'ap-northeast-1'`, `'eu-central-1'`, `'eu-west-1'`, `'sa-east-1'` :type location: `basestring` or None :param include_hidden: by default Flask-S3 will not upload hidden files. Set this to true to force the upload of hidden files. :type include_hidden: `bool` :param filepath_filter_regex: if specified, then the upload of static assets is limited to only those files whose relative path matches this regular expression string. For example, to only upload files within the 'css' directory of your app's static store, set to r'^css'. :type filepath_filter_regex: `basestring` or None :param put_bucket_acl: by default Flask-S3 will set the bucket ACL to public. Set this to false to leave the policy unchanged. :type put_bucket_acl: `bool` .. _bucket restrictions: http://docs.amazonwebservices.com/AmazonS3\ /latest/dev/BucketRestrictions.html """ user = user or app.config.get('AWS_ACCESS_KEY_ID') password = password or app.config.get('AWS_SECRET_ACCESS_KEY') bucket_name = bucket_name or app.config.get('FLASKS3_BUCKET_NAME') if not bucket_name: raise ValueError("No bucket name provided.") location = location or app.config.get('FLASKS3_REGION') endpoint_url = app.config.get('FLASKS3_ENDPOINT_URL') # build list of static files all_files = _gather_files(app, include_hidden, filepath_filter_regex=filepath_filter_regex) logger.debug("All valid files: %s" % all_files) # connect to s3 s3 = boto3.client("s3", endpoint_url=endpoint_url, region_name=location or None, aws_access_key_id=user, aws_secret_access_key=password) # get_or_create bucket try: s3.head_bucket(Bucket=bucket_name) except ClientError as e: if int(e.response['Error']['Code']) == 404: # Create the bucket bucket = s3.create_bucket(Bucket=bucket_name) else: raise if put_bucket_acl: s3.put_bucket_acl(Bucket=bucket_name, ACL='public-read') if get_setting('FLASKS3_ONLY_MODIFIED', app): try: hashes_object = s3.get_object(Bucket=bucket_name, Key='.file-hashes') hashes = json.loads(str(hashes_object['Body'].read().decode())) except ClientError as e: logger.warn("No file hashes found: %s" % e) hashes = None new_hashes = _upload_files(s3, app, all_files, bucket_name, hashes=hashes) try: s3.put_object(Bucket=bucket_name, Key='.file-hashes', Body=json.dumps(dict(new_hashes)), ACL='private') except boto3.exceptions.S3UploadFailedError as e: logger.warn("Unable to upload file hashes: %s" % e) else: _upload_files(s3, app, all_files, bucket_name)
python
def create_all(app, user=None, password=None, bucket_name=None, location=None, include_hidden=False, filepath_filter_regex=None, put_bucket_acl=True): """ Uploads of the static assets associated with a Flask application to Amazon S3. All static assets are identified on the local filesystem, including any static assets associated with *registered* blueprints. In turn, each asset is uploaded to the bucket described by `bucket_name`. If the bucket does not exist then it is created. Flask-S3 creates the same relative static asset folder structure on S3 as can be found within your Flask application. Many of the optional arguments to `create_all` can be specified instead in your application's configuration using the Flask-S3 `configuration`_ variables. :param app: a :class:`flask.Flask` application object. :param user: an AWS Access Key ID. You can find this key in the Security Credentials section of your AWS account. :type user: `basestring` or None :param password: an AWS Secret Access Key. You can find this key in the Security Credentials section of your AWS account. :type password: `basestring` or None :param bucket_name: the name of the bucket you wish to server your static assets from. **Note**: while a valid character, it is recommended that you do not include periods in bucket_name if you wish to serve over HTTPS. See Amazon's `bucket restrictions`_ for more details. :type bucket_name: `basestring` or None :param location: the AWS region to host the bucket in; an empty string indicates the default region should be used, which is the US Standard region. Possible location values include: `'DEFAULT'`, `'EU'`, `'us-east-1'`, `'us-west-1'`, `'us-west-2'`, `'ap-south-1'`, `'ap-northeast-2'`, `'ap-southeast-1'`, `'ap-southeast-2'`, `'ap-northeast-1'`, `'eu-central-1'`, `'eu-west-1'`, `'sa-east-1'` :type location: `basestring` or None :param include_hidden: by default Flask-S3 will not upload hidden files. Set this to true to force the upload of hidden files. :type include_hidden: `bool` :param filepath_filter_regex: if specified, then the upload of static assets is limited to only those files whose relative path matches this regular expression string. For example, to only upload files within the 'css' directory of your app's static store, set to r'^css'. :type filepath_filter_regex: `basestring` or None :param put_bucket_acl: by default Flask-S3 will set the bucket ACL to public. Set this to false to leave the policy unchanged. :type put_bucket_acl: `bool` .. _bucket restrictions: http://docs.amazonwebservices.com/AmazonS3\ /latest/dev/BucketRestrictions.html """ user = user or app.config.get('AWS_ACCESS_KEY_ID') password = password or app.config.get('AWS_SECRET_ACCESS_KEY') bucket_name = bucket_name or app.config.get('FLASKS3_BUCKET_NAME') if not bucket_name: raise ValueError("No bucket name provided.") location = location or app.config.get('FLASKS3_REGION') endpoint_url = app.config.get('FLASKS3_ENDPOINT_URL') # build list of static files all_files = _gather_files(app, include_hidden, filepath_filter_regex=filepath_filter_regex) logger.debug("All valid files: %s" % all_files) # connect to s3 s3 = boto3.client("s3", endpoint_url=endpoint_url, region_name=location or None, aws_access_key_id=user, aws_secret_access_key=password) # get_or_create bucket try: s3.head_bucket(Bucket=bucket_name) except ClientError as e: if int(e.response['Error']['Code']) == 404: # Create the bucket bucket = s3.create_bucket(Bucket=bucket_name) else: raise if put_bucket_acl: s3.put_bucket_acl(Bucket=bucket_name, ACL='public-read') if get_setting('FLASKS3_ONLY_MODIFIED', app): try: hashes_object = s3.get_object(Bucket=bucket_name, Key='.file-hashes') hashes = json.loads(str(hashes_object['Body'].read().decode())) except ClientError as e: logger.warn("No file hashes found: %s" % e) hashes = None new_hashes = _upload_files(s3, app, all_files, bucket_name, hashes=hashes) try: s3.put_object(Bucket=bucket_name, Key='.file-hashes', Body=json.dumps(dict(new_hashes)), ACL='private') except boto3.exceptions.S3UploadFailedError as e: logger.warn("Unable to upload file hashes: %s" % e) else: _upload_files(s3, app, all_files, bucket_name)
Uploads of the static assets associated with a Flask application to Amazon S3. All static assets are identified on the local filesystem, including any static assets associated with *registered* blueprints. In turn, each asset is uploaded to the bucket described by `bucket_name`. If the bucket does not exist then it is created. Flask-S3 creates the same relative static asset folder structure on S3 as can be found within your Flask application. Many of the optional arguments to `create_all` can be specified instead in your application's configuration using the Flask-S3 `configuration`_ variables. :param app: a :class:`flask.Flask` application object. :param user: an AWS Access Key ID. You can find this key in the Security Credentials section of your AWS account. :type user: `basestring` or None :param password: an AWS Secret Access Key. You can find this key in the Security Credentials section of your AWS account. :type password: `basestring` or None :param bucket_name: the name of the bucket you wish to server your static assets from. **Note**: while a valid character, it is recommended that you do not include periods in bucket_name if you wish to serve over HTTPS. See Amazon's `bucket restrictions`_ for more details. :type bucket_name: `basestring` or None :param location: the AWS region to host the bucket in; an empty string indicates the default region should be used, which is the US Standard region. Possible location values include: `'DEFAULT'`, `'EU'`, `'us-east-1'`, `'us-west-1'`, `'us-west-2'`, `'ap-south-1'`, `'ap-northeast-2'`, `'ap-southeast-1'`, `'ap-southeast-2'`, `'ap-northeast-1'`, `'eu-central-1'`, `'eu-west-1'`, `'sa-east-1'` :type location: `basestring` or None :param include_hidden: by default Flask-S3 will not upload hidden files. Set this to true to force the upload of hidden files. :type include_hidden: `bool` :param filepath_filter_regex: if specified, then the upload of static assets is limited to only those files whose relative path matches this regular expression string. For example, to only upload files within the 'css' directory of your app's static store, set to r'^css'. :type filepath_filter_regex: `basestring` or None :param put_bucket_acl: by default Flask-S3 will set the bucket ACL to public. Set this to false to leave the policy unchanged. :type put_bucket_acl: `bool` .. _bucket restrictions: http://docs.amazonwebservices.com/AmazonS3\ /latest/dev/BucketRestrictions.html
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L336-L454
e-dard/flask-s3
flask_s3.py
FlaskS3.init_app
def init_app(self, app): """ An alternative way to pass your :class:`flask.Flask` application object to Flask-S3. :meth:`init_app` also takes care of some default `settings`_. :param app: the :class:`flask.Flask` application object. """ for k, v in DEFAULT_SETTINGS.items(): app.config.setdefault(k, v) if app.debug and not get_setting('FLASKS3_DEBUG', app): app.config['FLASKS3_ACTIVE'] = False if get_setting('FLASKS3_ACTIVE', app): app.jinja_env.globals['url_for'] = url_for if get_setting('FLASKS3_USE_CACHE_CONTROL', app) and app.config.get('FLASKS3_CACHE_CONTROL'): cache_control_header = get_setting('FLASKS3_CACHE_CONTROL', app) app.config['FLASKS3_HEADERS']['Cache-Control'] = cache_control_header
python
def init_app(self, app): """ An alternative way to pass your :class:`flask.Flask` application object to Flask-S3. :meth:`init_app` also takes care of some default `settings`_. :param app: the :class:`flask.Flask` application object. """ for k, v in DEFAULT_SETTINGS.items(): app.config.setdefault(k, v) if app.debug and not get_setting('FLASKS3_DEBUG', app): app.config['FLASKS3_ACTIVE'] = False if get_setting('FLASKS3_ACTIVE', app): app.jinja_env.globals['url_for'] = url_for if get_setting('FLASKS3_USE_CACHE_CONTROL', app) and app.config.get('FLASKS3_CACHE_CONTROL'): cache_control_header = get_setting('FLASKS3_CACHE_CONTROL', app) app.config['FLASKS3_HEADERS']['Cache-Control'] = cache_control_header
An alternative way to pass your :class:`flask.Flask` application object to Flask-S3. :meth:`init_app` also takes care of some default `settings`_. :param app: the :class:`flask.Flask` application object.
https://github.com/e-dard/flask-s3/blob/b8c72b40eb38a05135eec36a90f1ee0c96248f72/flask_s3.py#L472-L491
taleinat/fuzzysearch
src/fuzzysearch/no_deletions.py
find_near_matches_no_deletions_ngrams
def find_near_matches_no_deletions_ngrams(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * no deletions are allowed * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked max_substitutions = min(max_substitutions, max_l_dist) max_insertions = min(max_insertions, max_l_dist) subseq_len = len(subsequence) seq_len = len(sequence) ngram_len = subseq_len // (max_substitutions + max_insertions + 1) if ngram_len == 0: raise ValueError( "The subsequence's length must be greater than max_subs + max_ins!" ) matches = [] matched_indexes = set() for ngram_start in range(0, len(subsequence) - ngram_len + 1, ngram_len): ngram_end = ngram_start + ngram_len subseq_before = subsequence[:ngram_start] subseq_before_reversed = subseq_before[::-1] subseq_after = subsequence[ngram_end:] start_index = max(0, ngram_start - max_insertions) end_index = min(seq_len, seq_len - (subseq_len - ngram_end) + max_insertions) for index in search_exact( subsequence[ngram_start:ngram_end], sequence, start_index, end_index, ): if index - ngram_start in matched_indexes: continue seq_after = sequence[index + ngram_len:index + subseq_len - ngram_start + max_insertions] if seq_after.startswith(subseq_after): matches_after = [(0, 0)] else: matches_after = _expand(subseq_after, seq_after, max_substitutions, max_insertions, max_l_dist) if not matches_after: continue _max_substitutions = max_substitutions - min(m[0] for m in matches_after) _max_insertions = max_insertions - min(m[1] for m in matches_after) _max_l_dist = max_l_dist - min(m[0] + m[1] for m in matches_after) seq_before = sequence[index - ngram_start - _max_insertions:index] if seq_before.endswith(subseq_before): matches_before = [(0, 0)] else: matches_before = _expand( subseq_before_reversed, seq_before[::-1], _max_substitutions, _max_insertions, _max_l_dist, ) for (subs_before, ins_before) in matches_before: for (subs_after, ins_after) in matches_after: if ( subs_before + subs_after <= max_substitutions and ins_before + ins_after <= max_insertions and subs_before + subs_after + ins_before + ins_after <= max_l_dist ): matches.append(Match( start=index - ngram_start - ins_before, end=index - ngram_start + subseq_len + ins_after, dist=subs_before + subs_after + ins_before + ins_after, )) matched_indexes |= set(range( index - ngram_start - ins_before, index - ngram_start - ins_before + max_insertions + 1, )) return sorted(matches, key=lambda match: match.start)
python
def find_near_matches_no_deletions_ngrams(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * no deletions are allowed * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked max_substitutions = min(max_substitutions, max_l_dist) max_insertions = min(max_insertions, max_l_dist) subseq_len = len(subsequence) seq_len = len(sequence) ngram_len = subseq_len // (max_substitutions + max_insertions + 1) if ngram_len == 0: raise ValueError( "The subsequence's length must be greater than max_subs + max_ins!" ) matches = [] matched_indexes = set() for ngram_start in range(0, len(subsequence) - ngram_len + 1, ngram_len): ngram_end = ngram_start + ngram_len subseq_before = subsequence[:ngram_start] subseq_before_reversed = subseq_before[::-1] subseq_after = subsequence[ngram_end:] start_index = max(0, ngram_start - max_insertions) end_index = min(seq_len, seq_len - (subseq_len - ngram_end) + max_insertions) for index in search_exact( subsequence[ngram_start:ngram_end], sequence, start_index, end_index, ): if index - ngram_start in matched_indexes: continue seq_after = sequence[index + ngram_len:index + subseq_len - ngram_start + max_insertions] if seq_after.startswith(subseq_after): matches_after = [(0, 0)] else: matches_after = _expand(subseq_after, seq_after, max_substitutions, max_insertions, max_l_dist) if not matches_after: continue _max_substitutions = max_substitutions - min(m[0] for m in matches_after) _max_insertions = max_insertions - min(m[1] for m in matches_after) _max_l_dist = max_l_dist - min(m[0] + m[1] for m in matches_after) seq_before = sequence[index - ngram_start - _max_insertions:index] if seq_before.endswith(subseq_before): matches_before = [(0, 0)] else: matches_before = _expand( subseq_before_reversed, seq_before[::-1], _max_substitutions, _max_insertions, _max_l_dist, ) for (subs_before, ins_before) in matches_before: for (subs_after, ins_after) in matches_after: if ( subs_before + subs_after <= max_substitutions and ins_before + ins_after <= max_insertions and subs_before + subs_after + ins_before + ins_after <= max_l_dist ): matches.append(Match( start=index - ngram_start - ins_before, end=index - ngram_start + subseq_len + ins_after, dist=subs_before + subs_after + ins_before + ins_after, )) matched_indexes |= set(range( index - ngram_start - ins_before, index - ngram_start - ins_before + max_insertions + 1, )) return sorted(matches, key=lambda match: match.start)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * no deletions are allowed * the total number of substitutions, insertions and deletions
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/no_deletions.py#L41-L125
taleinat/fuzzysearch
src/fuzzysearch/__init__.py
find_near_matches
def find_near_matches(subsequence, sequence, max_substitutions=None, max_insertions=None, max_deletions=None, max_l_dist=None): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions (a.k.a. the Levenshtein distance) """ search_params = LevenshteinSearchParams(max_substitutions, max_insertions, max_deletions, max_l_dist) search_func = choose_search_func(search_params) return search_func(subsequence, sequence, search_params)
python
def find_near_matches(subsequence, sequence, max_substitutions=None, max_insertions=None, max_deletions=None, max_l_dist=None): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions (a.k.a. the Levenshtein distance) """ search_params = LevenshteinSearchParams(max_substitutions, max_insertions, max_deletions, max_l_dist) search_func = choose_search_func(search_params) return search_func(subsequence, sequence, search_params)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions (a.k.a. the Levenshtein distance)
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/__init__.py#L30-L51
taleinat/fuzzysearch
src/fuzzysearch/substitutions_only.py
find_near_matches_substitutions
def find_near_matches_substitutions(subsequence, sequence, max_substitutions): """Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence. """ _check_arguments(subsequence, sequence, max_substitutions) if max_substitutions == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] elif len(subsequence) // (max_substitutions + 1) >= 3: return find_near_matches_substitutions_ngrams( subsequence, sequence, max_substitutions, ) else: return find_near_matches_substitutions_lp( subsequence, sequence, max_substitutions, )
python
def find_near_matches_substitutions(subsequence, sequence, max_substitutions): """Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence. """ _check_arguments(subsequence, sequence, max_substitutions) if max_substitutions == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] elif len(subsequence) // (max_substitutions + 1) >= 3: return find_near_matches_substitutions_ngrams( subsequence, sequence, max_substitutions, ) else: return find_near_matches_substitutions_lp( subsequence, sequence, max_substitutions, )
Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence.
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/substitutions_only.py#L37-L62
taleinat/fuzzysearch
src/fuzzysearch/substitutions_only.py
find_near_matches_substitutions_lp
def find_near_matches_substitutions_lp(subsequence, sequence, max_substitutions): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed """ _check_arguments(subsequence, sequence, max_substitutions) return list(_find_near_matches_substitutions_lp(subsequence, sequence, max_substitutions))
python
def find_near_matches_substitutions_lp(subsequence, sequence, max_substitutions): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed """ _check_arguments(subsequence, sequence, max_substitutions) return list(_find_near_matches_substitutions_lp(subsequence, sequence, max_substitutions))
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/substitutions_only.py#L65-L78
taleinat/fuzzysearch
src/fuzzysearch/substitutions_only.py
find_near_matches_substitutions_ngrams
def find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed """ _check_arguments(subsequence, sequence, max_substitutions) match_starts = set() matches = [] for match in _find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): if match.start not in match_starts: match_starts.add(match.start) matches.append(match) return sorted(matches, key=lambda match: match.start)
python
def find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed """ _check_arguments(subsequence, sequence, max_substitutions) match_starts = set() matches = [] for match in _find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): if match.start not in match_starts: match_starts.add(match.start) matches.append(match) return sorted(matches, key=lambda match: match.start)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/substitutions_only.py#L144-L163
taleinat/fuzzysearch
src/fuzzysearch/substitutions_only.py
has_near_match_substitutions_ngrams
def has_near_match_substitutions_ngrams(subsequence, sequence, max_substitutions): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed """ _check_arguments(subsequence, sequence, max_substitutions) for match in _find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): return True return False
python
def has_near_match_substitutions_ngrams(subsequence, sequence, max_substitutions): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed """ _check_arguments(subsequence, sequence, max_substitutions) for match in _find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): return True return False
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/substitutions_only.py#L211-L226
taleinat/fuzzysearch
src/fuzzysearch/generic_search.py
find_near_matches_generic
def find_near_matches_generic(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') # if the limitations are so strict that only exact matches are allowed, # use search_exact() if search_params.max_l_dist == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] # if the n-gram length would be at least 3, use the n-gram search method elif len(subsequence) // (search_params.max_l_dist + 1) >= 3: return find_near_matches_generic_ngrams(subsequence, sequence, search_params) # use the linear programming search method else: matches = find_near_matches_generic_linear_programming(subsequence, sequence, search_params) match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
python
def find_near_matches_generic(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') # if the limitations are so strict that only exact matches are allowed, # use search_exact() if search_params.max_l_dist == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] # if the n-gram length would be at least 3, use the n-gram search method elif len(subsequence) // (search_params.max_l_dist + 1) >= 3: return find_near_matches_generic_ngrams(subsequence, sequence, search_params) # use the linear programming search method else: matches = find_near_matches_generic_linear_programming(subsequence, sequence, search_params) match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/generic_search.py#L24-L56
taleinat/fuzzysearch
src/fuzzysearch/generic_search.py
_find_near_matches_generic_linear_programming
def _find_near_matches_generic_linear_programming(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked # optimization: prepare some often used things in advance subseq_len = len(subsequence) candidates = [] for index, char in enumerate(sequence): candidates.append(GenericSearchCandidate(index, 0, 0, 0, 0, 0)) new_candidates = [] for cand in candidates: # if this sequence char is the candidate's next expected char if char == subsequence[cand.subseq_index]: # if reached the end of the subsequence, return a match if cand.subseq_index + 1 == subseq_len: yield Match(cand.start, index + 1, cand.l_dist) # otherwise, update the candidate's subseq_index and keep it else: new_candidates.append(cand._replace( subseq_index=cand.subseq_index + 1, )) # if this sequence char is *not* the candidate's next expected char else: # we can try skipping a sequence or sub-sequence char (or both), # unless this candidate has already skipped the maximum allowed # number of characters if cand.l_dist == max_l_dist: continue if cand.n_ins < max_insertions: # add a candidate skipping a sequence char new_candidates.append(cand._replace( n_ins=cand.n_ins + 1, l_dist=cand.l_dist + 1, )) if cand.subseq_index + 1 < subseq_len: if cand.n_subs < max_substitutions: # add a candidate skipping both a sequence char and a # subsequence char new_candidates.append(cand._replace( n_subs=cand.n_subs + 1, subseq_index=cand.subseq_index + 1, l_dist=cand.l_dist + 1, )) elif cand.n_dels < max_deletions and cand.n_ins < max_insertions: # add a candidate skipping both a sequence char and a # subsequence char new_candidates.append(cand._replace( n_ins=cand.n_ins + 1, n_dels=cand.n_dels + 1, subseq_index=cand.subseq_index + 1, l_dist=cand.l_dist + 1, )) else: # cand.subseq_index == _subseq_len - 1 if ( cand.n_subs < max_substitutions or ( cand.n_dels < max_deletions and cand.n_ins < max_insertions ) ): yield Match(cand.start, index + 1, cand.l_dist + 1) # try skipping subsequence chars for n_skipped in xrange(1, min(max_deletions - cand.n_dels, max_l_dist - cand.l_dist) + 1): # if skipping n_dels sub-sequence chars reaches the end # of the sub-sequence, yield a match if cand.subseq_index + n_skipped == subseq_len: yield Match(cand.start, index + 1, cand.l_dist + n_skipped) break # otherwise, if skipping n_skipped sub-sequence chars # reaches a sub-sequence char identical to this sequence # char ... elif subsequence[cand.subseq_index + n_skipped] == char: # if this is the last char of the sub-sequence, yield # a match if cand.subseq_index + n_skipped + 1 == subseq_len: yield Match(cand.start, index + 1, cand.l_dist + n_skipped) # otherwise add a candidate skipping n_skipped # subsequence chars else: new_candidates.append(cand._replace( n_dels=cand.n_dels + n_skipped, subseq_index=cand.subseq_index + 1 + n_skipped, l_dist=cand.l_dist + n_skipped, )) break # note: if the above loop ends without a break, that means that # no candidate could be added / yielded by skipping sub-sequence # chars candidates = new_candidates for cand in candidates: # note: index + 1 == length(sequence) n_skipped = subseq_len - cand.subseq_index if cand.n_dels + n_skipped <= max_deletions and \ cand.l_dist + n_skipped <= max_l_dist: yield Match(cand.start, index + 1, cand.l_dist + n_skipped)
python
def _find_near_matches_generic_linear_programming(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked # optimization: prepare some often used things in advance subseq_len = len(subsequence) candidates = [] for index, char in enumerate(sequence): candidates.append(GenericSearchCandidate(index, 0, 0, 0, 0, 0)) new_candidates = [] for cand in candidates: # if this sequence char is the candidate's next expected char if char == subsequence[cand.subseq_index]: # if reached the end of the subsequence, return a match if cand.subseq_index + 1 == subseq_len: yield Match(cand.start, index + 1, cand.l_dist) # otherwise, update the candidate's subseq_index and keep it else: new_candidates.append(cand._replace( subseq_index=cand.subseq_index + 1, )) # if this sequence char is *not* the candidate's next expected char else: # we can try skipping a sequence or sub-sequence char (or both), # unless this candidate has already skipped the maximum allowed # number of characters if cand.l_dist == max_l_dist: continue if cand.n_ins < max_insertions: # add a candidate skipping a sequence char new_candidates.append(cand._replace( n_ins=cand.n_ins + 1, l_dist=cand.l_dist + 1, )) if cand.subseq_index + 1 < subseq_len: if cand.n_subs < max_substitutions: # add a candidate skipping both a sequence char and a # subsequence char new_candidates.append(cand._replace( n_subs=cand.n_subs + 1, subseq_index=cand.subseq_index + 1, l_dist=cand.l_dist + 1, )) elif cand.n_dels < max_deletions and cand.n_ins < max_insertions: # add a candidate skipping both a sequence char and a # subsequence char new_candidates.append(cand._replace( n_ins=cand.n_ins + 1, n_dels=cand.n_dels + 1, subseq_index=cand.subseq_index + 1, l_dist=cand.l_dist + 1, )) else: # cand.subseq_index == _subseq_len - 1 if ( cand.n_subs < max_substitutions or ( cand.n_dels < max_deletions and cand.n_ins < max_insertions ) ): yield Match(cand.start, index + 1, cand.l_dist + 1) # try skipping subsequence chars for n_skipped in xrange(1, min(max_deletions - cand.n_dels, max_l_dist - cand.l_dist) + 1): # if skipping n_dels sub-sequence chars reaches the end # of the sub-sequence, yield a match if cand.subseq_index + n_skipped == subseq_len: yield Match(cand.start, index + 1, cand.l_dist + n_skipped) break # otherwise, if skipping n_skipped sub-sequence chars # reaches a sub-sequence char identical to this sequence # char ... elif subsequence[cand.subseq_index + n_skipped] == char: # if this is the last char of the sub-sequence, yield # a match if cand.subseq_index + n_skipped + 1 == subseq_len: yield Match(cand.start, index + 1, cand.l_dist + n_skipped) # otherwise add a candidate skipping n_skipped # subsequence chars else: new_candidates.append(cand._replace( n_dels=cand.n_dels + n_skipped, subseq_index=cand.subseq_index + 1 + n_skipped, l_dist=cand.l_dist + n_skipped, )) break # note: if the above loop ends without a break, that means that # no candidate could be added / yielded by skipping sub-sequence # chars candidates = new_candidates for cand in candidates: # note: index + 1 == length(sequence) n_skipped = subseq_len - cand.subseq_index if cand.n_dels + n_skipped <= max_deletions and \ cand.l_dist + n_skipped <= max_l_dist: yield Match(cand.start, index + 1, cand.l_dist + n_skipped)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/generic_search.py#L59-L176
taleinat/fuzzysearch
src/fuzzysearch/generic_search.py
find_near_matches_generic_ngrams
def find_near_matches_generic_ngrams(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') matches = list(_find_near_matches_generic_ngrams(subsequence, sequence, search_params)) # don't return overlapping matches; instead, group overlapping matches # together and return the best match from each group match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
python
def find_near_matches_generic_ngrams(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') matches = list(_find_near_matches_generic_ngrams(subsequence, sequence, search_params)) # don't return overlapping matches; instead, group overlapping matches # together and return the best match from each group match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/generic_search.py#L202-L222
taleinat/fuzzysearch
src/fuzzysearch/generic_search.py
has_near_match_generic_ngrams
def has_near_match_generic_ngrams(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') for match in _find_near_matches_generic_ngrams(subsequence, sequence, search_params): return True return False
python
def has_near_match_generic_ngrams(subsequence, sequence, search_params): """search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions """ if not subsequence: raise ValueError('Given subsequence is empty!') for match in _find_near_matches_generic_ngrams(subsequence, sequence, search_params): return True return False
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/generic_search.py#L252-L268
taleinat/fuzzysearch
src/fuzzysearch/levenshtein_ngram.py
_expand
def _expand(subsequence, sequence, max_l_dist): """Expand a partial match of a Levenstein search. An expansion must begin at the beginning of the sequence, which makes this much simpler than a full search, and allows for greater optimization. """ # If given a long sub-sequence and relatively small max distance, # use a more complex algorithm better optimized for such cases. if len(subsequence) > max(max_l_dist * 2, 10): return _expand_long(subsequence, sequence, max_l_dist) else: return _expand_short(subsequence, sequence, max_l_dist)
python
def _expand(subsequence, sequence, max_l_dist): """Expand a partial match of a Levenstein search. An expansion must begin at the beginning of the sequence, which makes this much simpler than a full search, and allows for greater optimization. """ # If given a long sub-sequence and relatively small max distance, # use a more complex algorithm better optimized for such cases. if len(subsequence) > max(max_l_dist * 2, 10): return _expand_long(subsequence, sequence, max_l_dist) else: return _expand_short(subsequence, sequence, max_l_dist)
Expand a partial match of a Levenstein search. An expansion must begin at the beginning of the sequence, which makes this much simpler than a full search, and allows for greater optimization.
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/levenshtein_ngram.py#L9-L20
taleinat/fuzzysearch
src/fuzzysearch/levenshtein_ngram.py
_py_expand_short
def _py_expand_short(subsequence, sequence, max_l_dist): """Straightforward implementation of partial match expansion.""" # The following diagram shows the score calculation step. # # Each new score is the minimum of: # * a OR a + 1 (substitution, if needed) # * b + 1 (deletion, i.e. skipping a sequence character) # * c + 1 (insertion, i.e. skipping a sub-sequence character) # # a -- +1 -> c # # | \ | # | \ | # +1 +1? +1 # | \ | # v ⌟ v # # b -- +1 -> scores[subseq_index] subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) # Initialize the scores array with values for just skipping sub-sequence # chars. scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 for seq_index, char in enumerate(sequence): # calculate scores, one for each character in the sub-sequence a = seq_index c = a + 1 for subseq_index in range(subseq_len): b = scores[subseq_index] c = scores[subseq_index] = min( a + (char != subsequence[subseq_index]), b + 1, c + 1, ) a = b # keep the minimum score found for matches of the entire sub-sequence if c <= min_score: min_score = c min_score_idx = seq_index # bail early when it is impossible to find a better expansion elif min(scores) >= min_score: break return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
python
def _py_expand_short(subsequence, sequence, max_l_dist): """Straightforward implementation of partial match expansion.""" # The following diagram shows the score calculation step. # # Each new score is the minimum of: # * a OR a + 1 (substitution, if needed) # * b + 1 (deletion, i.e. skipping a sequence character) # * c + 1 (insertion, i.e. skipping a sub-sequence character) # # a -- +1 -> c # # | \ | # | \ | # +1 +1? +1 # | \ | # v ⌟ v # # b -- +1 -> scores[subseq_index] subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) # Initialize the scores array with values for just skipping sub-sequence # chars. scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 for seq_index, char in enumerate(sequence): # calculate scores, one for each character in the sub-sequence a = seq_index c = a + 1 for subseq_index in range(subseq_len): b = scores[subseq_index] c = scores[subseq_index] = min( a + (char != subsequence[subseq_index]), b + 1, c + 1, ) a = b # keep the minimum score found for matches of the entire sub-sequence if c <= min_score: min_score = c min_score_idx = seq_index # bail early when it is impossible to find a better expansion elif min(scores) >= min_score: break return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
Straightforward implementation of partial match expansion.
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/levenshtein_ngram.py#L23-L75
taleinat/fuzzysearch
src/fuzzysearch/levenshtein_ngram.py
_py_expand_long
def _py_expand_long(subsequence, sequence, max_l_dist): """Partial match expansion, optimized for long sub-sequences.""" # The additional optimization in this version is to limit the part of # the sub-sequence inspected for each sequence character. The start and # end of the iteration are limited to the range where the scores are # smaller than the maximum allowed distance. Additionally, once a good # expansion has been found, the range is further reduced to where the # scores are smaller than the score of the best expansion found so far. subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) # Initialize the scores array with values for just skipping sub-sequence # chars. scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 max_good_score = max_l_dist new_needle_idx_range_start = 0 new_needle_idx_range_end = subseq_len - 1 for seq_index, char in enumerate(sequence): # calculate scores, one for each character in the sub-sequence needle_idx_range_start = new_needle_idx_range_start needle_idx_range_end = min(subseq_len, new_needle_idx_range_end + 1) a = seq_index c = a + 1 if c <= max_good_score: new_needle_idx_range_start = 0 new_needle_idx_range_end = 0 else: new_needle_idx_range_start = None new_needle_idx_range_end = -1 for subseq_index in range(needle_idx_range_start, needle_idx_range_end): b = scores[subseq_index] c = scores[subseq_index] = min( a + (char != subsequence[subseq_index]), b + 1, c + 1, ) a = b if c <= max_good_score: if new_needle_idx_range_start is None: new_needle_idx_range_start = subseq_index new_needle_idx_range_end = max( new_needle_idx_range_end, subseq_index + 1 + (max_good_score - c), ) # bail early when it is impossible to find a better expansion if new_needle_idx_range_start is None: break # keep the minimum score found for matches of the entire sub-sequence if needle_idx_range_end == subseq_len and c <= min_score: min_score = c min_score_idx = seq_index if min_score < max_good_score: max_good_score = min_score return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
python
def _py_expand_long(subsequence, sequence, max_l_dist): """Partial match expansion, optimized for long sub-sequences.""" # The additional optimization in this version is to limit the part of # the sub-sequence inspected for each sequence character. The start and # end of the iteration are limited to the range where the scores are # smaller than the maximum allowed distance. Additionally, once a good # expansion has been found, the range is further reduced to where the # scores are smaller than the score of the best expansion found so far. subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) # Initialize the scores array with values for just skipping sub-sequence # chars. scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 max_good_score = max_l_dist new_needle_idx_range_start = 0 new_needle_idx_range_end = subseq_len - 1 for seq_index, char in enumerate(sequence): # calculate scores, one for each character in the sub-sequence needle_idx_range_start = new_needle_idx_range_start needle_idx_range_end = min(subseq_len, new_needle_idx_range_end + 1) a = seq_index c = a + 1 if c <= max_good_score: new_needle_idx_range_start = 0 new_needle_idx_range_end = 0 else: new_needle_idx_range_start = None new_needle_idx_range_end = -1 for subseq_index in range(needle_idx_range_start, needle_idx_range_end): b = scores[subseq_index] c = scores[subseq_index] = min( a + (char != subsequence[subseq_index]), b + 1, c + 1, ) a = b if c <= max_good_score: if new_needle_idx_range_start is None: new_needle_idx_range_start = subseq_index new_needle_idx_range_end = max( new_needle_idx_range_end, subseq_index + 1 + (max_good_score - c), ) # bail early when it is impossible to find a better expansion if new_needle_idx_range_start is None: break # keep the minimum score found for matches of the entire sub-sequence if needle_idx_range_end == subseq_len and c <= min_score: min_score = c min_score_idx = seq_index if min_score < max_good_score: max_good_score = min_score return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
Partial match expansion, optimized for long sub-sequences.
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/levenshtein_ngram.py#L78-L144
taleinat/fuzzysearch
src/fuzzysearch/levenshtein.py
find_near_matches_levenshtein
def find_near_matches_levenshtein(subsequence, sequence, max_l_dist): """Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence. """ if not subsequence: raise ValueError('Given subsequence is empty!') if max_l_dist < 0: raise ValueError('Maximum Levenshtein distance must be >= 0!') if max_l_dist == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] elif len(subsequence) // (max_l_dist + 1) >= 3: return find_near_matches_levenshtein_ngrams(subsequence, sequence, max_l_dist) else: matches = find_near_matches_levenshtein_linear_programming(subsequence, sequence, max_l_dist) match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
python
def find_near_matches_levenshtein(subsequence, sequence, max_l_dist): """Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence. """ if not subsequence: raise ValueError('Given subsequence is empty!') if max_l_dist < 0: raise ValueError('Maximum Levenshtein distance must be >= 0!') if max_l_dist == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] elif len(subsequence) // (max_l_dist + 1) >= 3: return find_near_matches_levenshtein_ngrams(subsequence, sequence, max_l_dist) else: matches = find_near_matches_levenshtein_linear_programming(subsequence, sequence, max_l_dist) match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence.
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/levenshtein.py#L9-L40
abw333/dominoes
dominoes/game.py
_randomized_hands
def _randomized_hands(): ''' :return: 4 hands, obtained by shuffling the 28 dominoes used in this variation of the game, and distributing them evenly ''' all_dominoes = [dominoes.Domino(i, j) for i in range(7) for j in range(i, 7)] random.shuffle(all_dominoes) return [dominoes.Hand(all_dominoes[0:7]), dominoes.Hand(all_dominoes[7:14]), dominoes.Hand(all_dominoes[14:21]), dominoes.Hand(all_dominoes[21:28])]
python
def _randomized_hands(): ''' :return: 4 hands, obtained by shuffling the 28 dominoes used in this variation of the game, and distributing them evenly ''' all_dominoes = [dominoes.Domino(i, j) for i in range(7) for j in range(i, 7)] random.shuffle(all_dominoes) return [dominoes.Hand(all_dominoes[0:7]), dominoes.Hand(all_dominoes[7:14]), dominoes.Hand(all_dominoes[14:21]), dominoes.Hand(all_dominoes[21:28])]
:return: 4 hands, obtained by shuffling the 28 dominoes used in this variation of the game, and distributing them evenly
https://github.com/abw333/dominoes/blob/ea9f532c9b834117a5c07d214711515872f7537e/dominoes/game.py#L6-L14
abw333/dominoes
dominoes/game.py
_validate_player
def _validate_player(player): ''' Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3. :param int player: player to be validated :return: None :raises NoSuchPlayerException: if the player is invalid ''' valid_players = range(4) if player not in valid_players: valid_players = ', '.join(str(p) for p in valid_players) raise dominoes.NoSuchPlayerException('{} is not a valid player. Valid players' ' are: {}'.format(player, valid_players))
python
def _validate_player(player): ''' Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3. :param int player: player to be validated :return: None :raises NoSuchPlayerException: if the player is invalid ''' valid_players = range(4) if player not in valid_players: valid_players = ', '.join(str(p) for p in valid_players) raise dominoes.NoSuchPlayerException('{} is not a valid player. Valid players' ' are: {}'.format(player, valid_players))
Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3. :param int player: player to be validated :return: None :raises NoSuchPlayerException: if the player is invalid
https://github.com/abw333/dominoes/blob/ea9f532c9b834117a5c07d214711515872f7537e/dominoes/game.py#L16-L28
abw333/dominoes
dominoes/game.py
_domino_hand
def _domino_hand(d, hands): ''' :param Domino d: domino to find within the hands :param list hands: hands to find domino in :return: index of the hand that contains the specified domino :raises NoSuchDominoException: if no hand contains the specified domino ''' for i, hand in enumerate(hands): if d in hand: return i raise dominoes.NoSuchDominoException('{} is not in any hand!'.format(d))
python
def _domino_hand(d, hands): ''' :param Domino d: domino to find within the hands :param list hands: hands to find domino in :return: index of the hand that contains the specified domino :raises NoSuchDominoException: if no hand contains the specified domino ''' for i, hand in enumerate(hands): if d in hand: return i raise dominoes.NoSuchDominoException('{} is not in any hand!'.format(d))
:param Domino d: domino to find within the hands :param list hands: hands to find domino in :return: index of the hand that contains the specified domino :raises NoSuchDominoException: if no hand contains the specified domino
https://github.com/abw333/dominoes/blob/ea9f532c9b834117a5c07d214711515872f7537e/dominoes/game.py#L30-L41