Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def new(self, log_block_size):
# type: (int) -> None
'''
Create a new Version Volume Descriptor.
Parameters:
log_block_size - The size of one extent.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Version Volume Descriptor is already initialized')
self._data = b'\x00' * log_block_size
self._initialized = True | [] |
Please provide a description of the function:def hdmbrcheck(disk_mbr, sector_count, bootable):
# type: (bytes, int, bool) -> int
'''
A function to sanity check an El Torito Hard Drive Master Boot Record (HDMBR).
On success, it returns the system_type (also known as the partition type) that
should be fed into the rest of the El Torito methods. On failure, it raises
an exception.
Parameters:
disk_mbr - The data to look in.
sector_count - The number of sectors expected in the MBR.
bootable - Whether this MBR is bootable.
Returns:
The system (or partition) type the should be fed into the rest of El Torito.
'''
# The MBR that we want to see to do hd emulation boot for El Torito is a standard
# x86 MBR, documented here:
# https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout
#
# In brief, it should consist of 512 bytes laid out like:
# Offset 0x0 - 0x1BD: Bootstrap code area
# Offset 0x1BE - 0x1CD: Partition entry 1
# Offset 0x1CE - 0x1DD: Partition entry 2
# Offset 0x1DE - 0x1ED: Partition entry 3
# Offset 0x1EE - 0x1FD: Partition entry 4
# Offset 0x1FE: 0x55
# Offset 0x1FF: 0xAA
#
# Each partition entry above should consist of:
# Offset 0x0: Active (bit 7 set) or inactive (all zeros)
# Offset 0x1 - 0x3: CHS address of first sector in partition
# Offset 0x1: Head
# Offset 0x2: Sector in bits 0-5, bits 6-7 are high bits of of cylinder
# Offset 0x3: bits 0-7 of cylinder
# Offset 0x4: Partition type (almost all of these are valid, see https://en.wikipedia.org/wiki/Partition_type)
# Offset 0x5 - 0x7: CHS address of last sector in partition (same format as first sector)
# Offset 0x8 - 0xB: LBA of first sector in partition
# Offset 0xC - 0xF: number of sectors in partition
PARTITION_TYPE_UNUSED = 0x0
PARTITION_STATUS_ACTIVE = 0x80
(bootstrap_unused, part1, part2, part3, part4, keybyte1,
keybyte2) = struct.unpack_from('=446s16s16s16s16sBB', disk_mbr, 0)
if keybyte1 != 0x55 or keybyte2 != 0xAA:
raise pycdlibexception.PyCdlibInvalidInput('Invalid magic on HD MBR')
parts = [part1, part2, part3, part4]
system_type = PARTITION_TYPE_UNUSED
for part in parts:
(status, s_head, s_seccyl, s_cyl, parttype, e_head, e_seccyl, e_cyl,
lba_unused, num_sectors_unused) = struct.unpack('=BBBBBBBBLL', part)
if parttype == PARTITION_TYPE_UNUSED:
continue
if system_type != PARTITION_TYPE_UNUSED:
raise pycdlibexception.PyCdlibInvalidInput('Boot image has multiple partitions')
if bootable and status != PARTITION_STATUS_ACTIVE:
# genisoimage prints a warning in this case, but we have no other
# warning prints in the whole codebase, and an exception will probably
# make us too fragile. So we leave the code but don't do anything.
with open(os.devnull, 'w') as devnull:
print('Warning: partition not marked active', file=devnull)
cyl = ((s_seccyl & 0xC0) << 10) | s_cyl
sec = s_seccyl & 0x3f
if cyl != 0 or s_head != 1 or sec != 1:
# genisoimage prints a warning in this case, but we have no other
# warning prints in the whole codebase, and an exception will probably
# make us too fragile. So we leave the code but don't do anything.
with open(os.devnull, 'w') as devnull:
print('Warning: partition does not start at 0/1/1', file=devnull)
cyl = ((e_seccyl & 0xC0) << 10) | e_cyl
sec = e_seccyl & 0x3f
geometry_sectors = (cyl + 1) * (e_head + 1) * sec
if sector_count != geometry_sectors:
# genisoimage prints a warning in this case, but we have no other
# warning prints in the whole codebase, and an exception will probably
# make us too fragile. So we leave the code but don't do anything.
with open(os.devnull, 'w') as devnull:
print('Warning: image size does not match geometry', file=devnull)
system_type = parttype
if system_type == PARTITION_TYPE_UNUSED:
raise pycdlibexception.PyCdlibInvalidInput('Boot image has no partitions')
return system_type | [] |
Please provide a description of the function:def parse(self, vd, datastr, ino):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, inode.Inode) -> bool
'''
A method to parse a boot info table out of a string.
Parameters:
vd - The Volume Descriptor associated with this Boot Info Table.
datastr - The string to parse the boot info table out of.
ino - The Inode associated with the boot file.
Returns:
True if this is a valid El Torito Boot Info Table, False otherwise.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Eltorito Boot Info Table is already initialized')
(pvd_extent, rec_extent, self.orig_len,
self.csum) = struct.unpack_from('=LLLL', datastr, 0)
if pvd_extent != vd.extent_location() or rec_extent != ino.extent_location():
return False
self.vd = vd
self.inode = ino
self._initialized = True
return True | [] |
Please provide a description of the function:def new(self, vd, ino, orig_len, csum):
# type: (headervd.PrimaryOrSupplementaryVD, inode.Inode, int, int) -> None
'''
A method to create a new boot info table.
Parameters:
vd - The volume descriptor to associate with this boot info table.
ino - The Inode associated with this Boot Info Table.
orig_len - The original length of the file before the boot info table was patched into it.
csum - The checksum for the boot file, starting at the byte after the boot info table.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Eltorito Boot Info Table is already initialized')
self.vd = vd
self.orig_len = orig_len
self.csum = csum
self.inode = ino
self._initialized = True | [] |
Please provide a description of the function:def record(self):
# type: () -> bytes
'''
A method to generate a string representing this boot info table.
Parameters:
None.
Returns:
A string representing this boot info table.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Eltorito Boot Info Table not yet initialized')
return struct.pack('=LLLL', self.vd.extent_location(),
self.inode.extent_location(), self.orig_len,
self.csum) + b'\x00' * 40 | [] |
Please provide a description of the function:def _checksum(data):
# type: (bytes) -> int
'''
A static method to compute the checksum on the ISO. Note that this is
*not* a 1's complement checksum; when an addition overflows, the carry
bit is discarded, not added to the end.
Parameters:
data - The data to compute the checksum over.
Returns:
The checksum of the data.
'''
def identity(x):
# type: (int) -> int
'''
The identity function so we can use a function for python2/3
compatibility.
'''
return x
if isinstance(data, str):
myord = ord
elif isinstance(data, bytes):
myord = identity
s = 0
for i in range(0, len(data), 2):
w = myord(data[i]) + (myord(data[i + 1]) << 8)
s = (s + w) & 0xffff
return s | [] |
Please provide a description of the function:def parse(self, valstr):
# type: (bytes) -> None
'''
A method to parse an El Torito Validation Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Validation Entry out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Validation Entry already initialized')
(header_id, self.platform_id, reserved_unused, self.id_string,
self.checksum, keybyte1,
keybyte2) = struct.unpack_from(self.FMT, valstr, 0)
if header_id != 1:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry header ID not 1')
if self.platform_id not in (0, 1, 2):
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry platform ID not valid')
if keybyte1 != 0x55:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry first keybyte not 0x55')
if keybyte2 != 0xaa:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry second keybyte not 0xaa')
# Now that we've done basic checking, calculate the checksum of the
# validation entry and make sure it is right.
if self._checksum(valstr) != 0:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry checksum not correct')
self._initialized = True | [] |
Please provide a description of the function:def new(self, platform_id):
# type: (int) -> None
'''
A method to create a new El Torito Validation Entry.
Parameters:
platform_id - The platform ID to set for this validation entry.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Validation Entry already initialized')
self.platform_id = platform_id
self.id_string = b'\x00' * 24 # FIXME: let the user set this
self.checksum = 0
self.checksum = utils.swab_16bit(self._checksum(self._record()) - 1)
self._initialized = True | [] |
Please provide a description of the function:def _record(self):
# type: () -> bytes
'''
An internal method to generate a string representing this El Torito
Validation Entry.
Parameters:
None.
Returns:
String representing this El Torito Validation Entry.
'''
return struct.pack(self.FMT, 1, self.platform_id, 0, self.id_string,
self.checksum, 0x55, 0xaa) | [] |
Please provide a description of the function:def parse(self, valstr):
# type: (bytes) -> None
'''
A method to parse an El Torito Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Entry out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry already initialized')
(self.boot_indicator, self.boot_media_type, self.load_segment,
self.system_type, unused1, self.sector_count, self.load_rba,
self.selection_criteria_type,
self.selection_criteria) = struct.unpack_from(self.FMT, valstr, 0)
if self.boot_indicator not in (0x88, 0x00):
raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito initial entry boot indicator')
if self.boot_media_type > 4:
raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito boot media type')
# FIXME: check that the system type matches the partition table
if unused1 != 0:
raise pycdlibexception.PyCdlibInvalidISO('El Torito unused field must be 0')
# According to the specification, the El Torito unused end field (bytes
# 0xc - 0x1f, unused2 field) should be all zero. However, we have found
# ISOs in the wild where that is not the case, so skip that particular
# check here.
self._initialized = True | [] |
Please provide a description of the function:def new(self, sector_count, load_seg, media_name, system_type, bootable):
# type: (int, int, str, int, bool) -> None
'''
A method to create a new El Torito Entry.
Parameters:
sector_count - The number of sectors to assign to this El Torito Entry.
load_seg - The load segment address of the boot image.
media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'.
system_type - The partition type to assign to the entry.
bootable - Whether this entry is bootable.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry already initialized')
if media_name == 'noemul':
media_type = self.MEDIA_NO_EMUL
elif media_name == 'floppy':
if sector_count == 2400:
media_type = self.MEDIA_12FLOPPY
elif sector_count == 2880:
media_type = self.MEDIA_144FLOPPY
elif sector_count == 5760:
media_type = self.MEDIA_288FLOPPY
else:
raise pycdlibexception.PyCdlibInvalidInput('Invalid sector count for floppy media type; must be 2400, 2880, or 5760')
# With floppy booting, the sector_count always ends up being 1
sector_count = 1
elif media_name == 'hdemul':
media_type = self.MEDIA_HD_EMUL
# With HD emul booting, the sector_count always ends up being 1
sector_count = 1
else:
raise pycdlibexception.PyCdlibInvalidInput("Invalid media name '%s'" % (media_name))
if bootable:
self.boot_indicator = 0x88
else:
self.boot_indicator = 0
self.boot_media_type = media_type
self.load_segment = load_seg
self.system_type = system_type
self.sector_count = sector_count
self.load_rba = 0 # This will get set later
self.selection_criteria_type = 0 # FIXME: allow the user to set this
self.selection_criteria = b''.ljust(19, b'\x00')
self._initialized = True | [] |
Please provide a description of the function:def set_data_location(self, current_extent, tag_location): # pylint: disable=unused-argument
# type: (int, int) -> None
'''
A method to update the extent (and RBA) for this entry.
Parameters:
current_extent - The new extent to set for this entry.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized')
self.load_rba = current_extent | [] |
Please provide a description of the function:def set_inode(self, ino):
# type: (inode.Inode) -> None
'''
A method to set the Inode associated with this El Torito Entry.
Parameters:
ino - The Inode object corresponding to this entry.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized')
self.inode = ino | [] |
Please provide a description of the function:def record(self):
# type: () -> bytes
'''
A method to generate a string representing this El Torito Entry.
Parameters:
None.
Returns:
String representing this El Torito Entry.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized')
return struct.pack(self.FMT, self.boot_indicator, self.boot_media_type,
self.load_segment, self.system_type, 0,
self.sector_count, self.load_rba,
self.selection_criteria_type,
self.selection_criteria) | [] |
Please provide a description of the function:def set_data_length(self, length):
# type: (int) -> None
'''
A method to set the length of data for this El Torito Entry.
Parameters:
length - The new length for the El Torito Entry.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry not initialized')
self.sector_count = utils.ceiling_div(length, 512) | [] |
Please provide a description of the function:def parse(self, valstr):
# type: (bytes) -> None
'''
Parse an El Torito section header from a string.
Parameters:
valstr - The string to parse.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Section Header already initialized')
(self.header_indicator, self.platform_id, self.num_section_entries,
self.id_string) = struct.unpack_from(self.FMT, valstr, 0)
self._initialized = True | [] |
Please provide a description of the function:def new(self, id_string, platform_id):
# type: (bytes, int) -> None
'''
Create a new El Torito section header.
Parameters:
id_string - The ID to use for this section header.
platform_id - The platform ID for this section header.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Section Header already initialized')
# We always assume this is the last section, until we are told otherwise
# via set_record_not_last.
self.header_indicator = 0x91
self.platform_id = platform_id
self.num_section_entries = 0
self.id_string = id_string
self._initialized = True | [] |
Please provide a description of the function:def add_parsed_entry(self, entry):
# type: (EltoritoEntry) -> None
'''
A method to add a parsed entry to the list of entries of this header.
If the number of parsed entries exceeds what was expected from the
initial parsing of the header, this method will throw an Exception.
Parameters:
entry - The EltoritoEntry object to add to the list of entries.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Section Header not yet initialized')
if len(self.section_entries) >= self.num_section_entries:
raise pycdlibexception.PyCdlibInvalidInput('Eltorito section had more entries than expected by section header; ISO is corrupt')
self.section_entries.append(entry) | [] |
Please provide a description of the function:def add_new_entry(self, entry):
# type: (EltoritoEntry) -> None
'''
A method to add a completely new entry to the list of entries of this
header.
Parameters:
entry - The new EltoritoEntry object to add to the list of entries.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Section Header not yet initialized')
self.num_section_entries += 1
self.section_entries.append(entry) | [] |
Please provide a description of the function:def record(self):
# type: () -> bytes
'''
Get a string representing this El Torito section header.
Parameters:
None.
Returns:
A string representing this El Torito section header.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Section Header not yet initialized')
outlist = [struct.pack(self.FMT, self.header_indicator,
self.platform_id, self.num_section_entries,
self.id_string)]
for entry in self.section_entries:
outlist.append(entry.record())
return b''.join(outlist) | [] |
Please provide a description of the function:def parse(self, valstr):
# type: (bytes) -> bool
'''
A method to parse an El Torito Boot Catalog out of a string.
Parameters:
valstr - The string to parse the El Torito Boot Catalog out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog already initialized')
if self.state == self.EXPECTING_VALIDATION_ENTRY:
# The first entry in an El Torito boot catalog is the Validation
# Entry. A Validation entry consists of 32 bytes (described in
# detail in the parse_eltorito_validation_entry() method).
self.validation_entry.parse(valstr)
self.state = self.EXPECTING_INITIAL_ENTRY
elif self.state == self.EXPECTING_INITIAL_ENTRY:
# The next entry is the Initial/Default entry. An Initial/Default
# entry consists of 32 bytes (described in detail in the
# parse_eltorito_initial_entry() method).
self.initial_entry.parse(valstr)
self.state = self.EXPECTING_SECTION_HEADER_OR_DONE
else:
val = bytes(bytearray([valstr[0]]))
if val == b'\x00':
# An empty entry tells us we are done parsing El Torito. Do
# some sanity checks.
last_section_index = len(self.sections) - 1
for index, sec in enumerate(self.sections):
if sec.num_section_entries != len(sec.section_entries):
raise pycdlibexception.PyCdlibInvalidISO('El Torito section header specified %d entries, only saw %d' % (sec.num_section_entries, len(sec.section_entries)))
if index != last_section_index:
if sec.header_indicator != 0x90:
raise pycdlibexception.PyCdlibInvalidISO('Intermediate El Torito section header not properly specified')
# In theory, we should also make sure that the very last
# section has a header_indicator of 0x91. However, we
# have seen ISOs in the wild (FreeBSD 11.0 amd64) in which
# this is not the case, so we skip that check.
self._initialized = True
elif val in (b'\x90', b'\x91'):
# A Section Header Entry
section_header = EltoritoSectionHeader()
section_header.parse(valstr)
self.sections.append(section_header)
elif val in (b'\x88', b'\x00'):
# A Section Entry. According to El Torito 2.4, a Section Entry
# must follow a Section Header, but we have seen ISOs in the
# wild that do not follow this (Mageia 4 ISOs, for instance).
# To deal with this, we get a little complicated here. If there
# is a previous section header, and the length of the entries
# attached to it is less than the number of entries it should
# have, then we attach this entry to that header. If there is
# no previous section header, or if the previous section header
# is already 'full', then we make this a standalone entry.
secentry = EltoritoEntry()
secentry.parse(valstr)
if self.sections and len(self.sections[-1].section_entries) < self.sections[-1].num_section_entries:
self.sections[-1].add_parsed_entry(secentry)
else:
self.standalone_entries.append(secentry)
elif val == b'\x44':
# A Section Entry Extension
self.sections[-1].section_entries[-1].selection_criteria += valstr[2:]
else:
raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito Boot Catalog entry')
return self._initialized | [] |
Please provide a description of the function:def new(self, br, ino, sector_count, load_seg, media_name, system_type,
platform_id, bootable):
# type: (headervd.BootRecord, inode.Inode, int, int, str, int, int, bool) -> None
'''
A method to create a new El Torito Boot Catalog.
Parameters:
br - The boot record that this El Torito Boot Catalog is associated with.
ino - The Inode to associate with the initial entry.
sector_count - The number of sectors for the initial entry.
load_seg - The load segment address of the boot image.
media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'.
system_type - The partition type the entry should be.
platform_id - The platform id to set in the validation entry.
bootable - Whether this entry should be bootable.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog already initialized')
# Create the El Torito validation entry
self.validation_entry.new(platform_id)
self.initial_entry.new(sector_count, load_seg, media_name, system_type,
bootable)
self.initial_entry.set_inode(ino)
ino.linked_records.append(self.initial_entry)
self.br = br
self._initialized = True | [] |
Please provide a description of the function:def add_section(self, ino, sector_count, load_seg, media_name, system_type,
efi, bootable):
# type: (inode.Inode, int, int, str, int, bool, bool) -> None
'''
A method to add an section header and entry to this Boot Catalog.
Parameters:
ino - The Inode object to associate with the new Entry.
sector_count - The number of sectors to assign to the new Entry.
load_seg - The load segment address of the boot image.
media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'.
system_type - The type of partition this entry should be.
efi - Whether this section is an EFI section.
bootable - Whether this entry should be bootable.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog not yet initialized')
# The Eltorito Boot Catalog can only be 2048 bytes (1 extent). By
# default, the first 64 bytes are used by the Validation Entry and the
# Initial Entry, which leaves 1984 bytes. Each section takes up 32
# bytes for the Section Header and 32 bytes for the Section Entry, for
# a total of 64 bytes, so we can have a maximum of 1984/64 = 31
# sections.
if len(self.sections) == 31:
raise pycdlibexception.PyCdlibInvalidInput('Too many Eltorito sections')
sec = EltoritoSectionHeader()
platform_id = self.validation_entry.platform_id
if efi:
platform_id = 0xef
sec.new(b'\x00' * 28, platform_id)
secentry = EltoritoEntry()
secentry.new(sector_count, load_seg, media_name, system_type, bootable)
secentry.set_inode(ino)
ino.linked_records.append(secentry)
sec.add_new_entry(secentry)
if self.sections:
self.sections[-1].set_record_not_last()
self.sections.append(sec) | [] |
Please provide a description of the function:def record(self):
# type: () -> bytes
'''
A method to generate a string representing this El Torito Boot Catalog.
Parameters:
None.
Returns:
A string representing this El Torito Boot Catalog.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog not yet initialized')
outlist = [self.validation_entry.record(), self.initial_entry.record()]
for sec in self.sections:
outlist.append(sec.record())
for entry in self.standalone_entries:
outlist.append(entry.record())
return b''.join(outlist) | [] |
Please provide a description of the function:def add_dirrecord(self, rec):
# type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry]) -> None
'''
A method to set the Directory Record associated with this Boot Catalog.
Parameters:
rec - The DirectoryRecord object to associate with this Boot Catalog.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog not yet initialized')
self.dirrecords.append(rec) | [] |
Please provide a description of the function:def update_catalog_extent(self, current_extent):
# type: (int) -> None
'''
A method to update the extent associated with this Boot Catalog.
Parameters:
current_extent - New extent to associate with this Boot Catalog
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog not yet initialized')
self.br.update_boot_system_use(struct.pack('=L', current_extent)) | [] |
Please provide a description of the function:def _check_d1_characters(name):
# type: (bytes) -> None
'''
A function to check that a name only uses d1 characters as defined by ISO9660.
Parameters:
name - The name to check.
Returns:
Nothing.
'''
bytename = bytearray(name)
for char in bytename:
if char not in _allowed_d1_characters:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must consist of characters A-Z, 0-9, and _') | [] |
Please provide a description of the function:def _split_iso9660_filename(fullname):
# type: (bytes) -> Tuple[bytes, bytes, bytes]
'''
A function to split an ISO 9660 filename into its constituent parts. This
is the name, the extension, and the version number.
Parameters:
fullname - The name to split.
Returns:
A tuple containing the name, extension, and version.
'''
namesplit = fullname.split(b';')
version = b''
if len(namesplit) > 1:
version = namesplit.pop()
rest = b';'.join(namesplit)
dotsplit = rest.split(b'.')
if len(dotsplit) == 1:
name = dotsplit[0]
extension = b''
else:
name = b'.'.join(dotsplit[:-1])
extension = dotsplit[-1]
return (name, extension, version) | [] |
Please provide a description of the function:def _check_iso9660_filename(fullname, interchange_level):
# type: (bytes, int) -> None
'''
A function to check that a file identifier conforms to the ISO9660 rules
for a particular interchange level.
Parameters:
fullname - The name to check.
interchange_level - The interchange level to check against.
Returns:
Nothing.
'''
# Check to ensure the name is a valid filename for the ISO according to
# Ecma-119 7.5.
(name, extension, version) = _split_iso9660_filename(fullname)
# Ecma-119 says that filenames must end with a semicolon-number, but I have
# found CDs (Ubuntu 14.04 Desktop i386, for instance) that do not follow
# this. Thus we allow for names both with and without the semi+version.
# Ecma-119 says that filenames must have a version number, but I have
# found CDs (FreeBSD 10.1 amd64) that do not have any version number.
# Allow for this.
if version != b'' and (int(version) < 1 or int(version) > 32767):
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must have a version between 1 and 32767')
# Ecma-119 section 7.5.1 specifies that filenames must have at least one
# character in either the name or the extension.
if not name and not extension:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must have a non-empty name or extension')
if b';' in name or b';' in extension:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must contain exactly one semicolon')
if interchange_level == 1:
# According to Ecma-119, section 10.1, at level 1 the filename can
# only be up to 8 d-characters or d1-characters, and the extension can
# only be up to 3 d-characters or 3 d1-characters.
if len(name) > 8 or len(extension) > 3:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames at interchange level 1 cannot have more than 8 characters or 3 characters in the extension')
else:
# For all other interchange levels, the maximum filename length is
# specified in Ecma-119 7.5.2. However, I have found CDs (Ubuntu 14.04
# Desktop i386, for instance) that don't conform to this. Skip the
# check until we know how long is allowed.
pass
# Ecma-119 section 7.5.1 says that the file name and extension each contain
# zero or more d-characters or d1-characters. While the definition of
# d-characters and d1-characters is not specified in Ecma-119,
# http://wiki.osdev.org/ISO_9660 suggests that this consists of A-Z, 0-9, _
# which seems to correlate with empirical evidence. Thus we check for that
# here.
if interchange_level < 4:
_check_d1_characters(name)
_check_d1_characters(extension) | [] |
Please provide a description of the function:def _check_iso9660_directory(fullname, interchange_level):
# type: (bytes, int) -> None
'''
A function to check that an directory identifier conforms to the ISO9660 rules
for a particular interchange level.
Parameters:
fullname - The name to check.
interchange_level - The interchange level to check against.
Returns:
Nothing.
'''
# Check to ensure the directory name is valid for the ISO according to
# Ecma-119 7.6.
# Ecma-119 section 7.6.1 says that a directory identifier needs at least one
# character
if not fullname:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 directory names must be at least 1 character long')
maxlen = float('inf')
if interchange_level == 1:
# Ecma-119 section 10.1 says that directory identifiers lengths cannot
# exceed 8 at interchange level 1.
maxlen = 8
elif interchange_level in (2, 3):
# Ecma-119 section 7.6.3 says that directory identifiers lengths cannot
# exceed 207.
maxlen = 207
# for interchange_level 4, we allow any length
if len(fullname) > maxlen:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 directory names at interchange level %d cannot exceed %d characters' % (interchange_level, maxlen))
# Ecma-119 section 7.6.1 says that directory names consist of one or more
# d-characters or d1-characters. While the definition of d-characters and
# d1-characters is not specified in Ecma-119,
# http://wiki.osdev.org/ISO_9660 suggests that this consists of A-Z, 0-9, _
# which seems to correlate with empirical evidence. Thus we check for that
# here.
if interchange_level < 4:
_check_d1_characters(fullname) | [] |
Please provide a description of the function:def _interchange_level_from_filename(fullname):
# type: (bytes) -> int
'''
A function to determine the ISO interchange level from the filename.
In theory, there are 3 levels, but in practice we only deal with level 1
and level 3.
Parameters:
name - The name to use to determine the interchange level.
Returns:
The interchange level determined from this filename.
'''
(name, extension, version) = _split_iso9660_filename(fullname)
interchange_level = 1
if version != b'' and (int(version) < 1 or int(version) > 32767):
interchange_level = 3
if b';' in name or b';' in extension:
interchange_level = 3
if len(name) > 8 or len(extension) > 3:
interchange_level = 3
try:
_check_d1_characters(name)
_check_d1_characters(extension)
except pycdlibexception.PyCdlibInvalidInput:
interchange_level = 3
return interchange_level | [] |
Please provide a description of the function:def _interchange_level_from_directory(name):
# type: (bytes) -> int
'''
A function to determine the ISO interchange level from the directory name.
In theory, there are 3 levels, but in practice we only deal with level 1
and level 3.
Parameters:
name - The name to use to determine the interchange level.
Returns:
The interchange level determined from this filename.
'''
interchange_level = 1
if len(name) > 8:
interchange_level = 3
try:
_check_d1_characters(name)
except pycdlibexception.PyCdlibInvalidInput:
interchange_level = 3
return interchange_level | [] |
Please provide a description of the function:def _reassign_vd_dirrecord_extents(vd, current_extent):
# type: (headervd.PrimaryOrSupplementaryVD, int) -> Tuple[int, List[inode.Inode]]
'''
An internal helper method for reassign_extents that assigns extents to
directory records for the passed in Volume Descriptor. The current
extent is passed in, and this function returns the extent after the
last one it assigned.
Parameters:
vd - The volume descriptor on which to operate.
current_extent - The current extent before assigning extents to the
volume descriptor directory records.
Returns:
The current extent after assigning extents to the volume descriptor
directory records.
'''
log_block_size = vd.logical_block_size()
# Here we re-walk the entire tree, re-assigning extents as necessary.
root_dir_record = vd.root_directory_record()
root_dir_record.set_data_location(current_extent, 0)
current_extent += utils.ceiling_div(root_dir_record.data_length, log_block_size)
# Walk through the list, assigning extents to all of the directories.
child_link_recs = [] # type: List[dr.DirectoryRecord]
parent_link_recs = [] # type: List[dr.DirectoryRecord]
file_list = []
ptr_index = 1
dirs = collections.deque([root_dir_record])
while dirs:
dir_record = dirs.popleft()
if dir_record.is_root:
# The root directory record doesn't need an extent assigned,
# so just add its children to the list and continue on
for child in dir_record.children:
if child.ptr is not None:
child.ptr.update_parent_directory_number(ptr_index)
ptr_index += 1
dirs.extend(dir_record.children)
continue
dir_record_parent = dir_record.parent
if dir_record_parent is None:
raise pycdlibexception.PyCdlibInternalError('Parent of record is empty, this should never happen')
if dir_record.is_dot():
dir_record.set_data_location(dir_record_parent.extent_location(), 0)
continue
dir_record_rock_ridge = dir_record.rock_ridge
if dir_record.is_dotdot():
if dir_record_parent.is_root:
# Special case of the root directory record. In this case, we
# set the dotdot extent location to the same as the root.
dir_record.set_data_location(dir_record_parent.extent_location(), 0)
continue
if dir_record_parent.parent is None:
raise pycdlibexception.PyCdlibInternalError('Grandparent of record is empty, this should never happen')
dir_record.set_data_location(dir_record_parent.parent.extent_location(), 0)
# Now that we've set the data location, move around the Rock Ridge
# links if necessary.
if dir_record_rock_ridge is not None:
if dir_record_rock_ridge.parent_link is not None:
parent_link_recs.append(dir_record)
if dir_record_parent.rock_ridge is not None:
if dir_record_parent.parent is not None:
if dir_record_parent.parent.is_root:
source_dr = dir_record_parent.parent.children[0]
else:
source_dr = dir_record_parent.parent
if source_dr is None or source_dr.rock_ridge is None:
raise pycdlibexception.PyCdlibInternalError('Expected directory record to have Rock Ridge')
dir_record_rock_ridge.copy_file_links(source_dr.rock_ridge)
continue
if dir_record.is_dir():
dir_record.set_data_location(current_extent, current_extent)
for child in dir_record.children:
if child.ptr is not None:
child.ptr.update_parent_directory_number(ptr_index)
ptr_index += 1
if dir_record_rock_ridge is None or not dir_record_rock_ridge.child_link_record_exists():
current_extent += utils.ceiling_div(dir_record.data_length, log_block_size)
dirs.extend(dir_record.children)
else:
if dir_record.data_length == 0 or (dir_record_rock_ridge is not None and (dir_record_rock_ridge.child_link_record_exists() or dir_record_rock_ridge.is_symlink())):
# If this is a child link record, the extent location really
# doesn't matter, since it is fake. We set it to zero.
dir_record.set_data_location(0, 0)
else:
if dir_record.inode is not None:
file_list.append(dir_record.inode)
if dir_record_rock_ridge is not None:
if dir_record_rock_ridge.dr_entries.ce_record is not None and dir_record_rock_ridge.ce_block is not None:
if dir_record_rock_ridge.ce_block.extent_location() < 0:
dir_record_rock_ridge.ce_block.set_extent_location(current_extent)
current_extent += 1
dir_record_rock_ridge.dr_entries.ce_record.update_extent(dir_record_rock_ridge.ce_block.extent_location())
if dir_record_rock_ridge.cl_to_moved_dr is not None:
child_link_recs.append(dir_record)
# After we have reshuffled the extents, we need to update the rock ridge
# links.
for ch in child_link_recs:
if ch.rock_ridge is not None:
ch.rock_ridge.child_link_update_from_dirrecord()
for p in parent_link_recs:
if p.rock_ridge is not None:
p.rock_ridge.parent_link_update_from_dirrecord()
return current_extent, file_list | [] |
Please provide a description of the function:def _yield_children(rec):
# type: (dr.DirectoryRecord) -> Generator
'''
An internal function to gather and yield all of the children of a Directory
Record.
Parameters:
rec - The Directory Record to get all of the children from (must be a
directory)
Yields:
Children of this Directory Record.
Returns:
Nothing.
'''
if not rec.is_dir():
raise pycdlibexception.PyCdlibInvalidInput('Record is not a directory!')
last = b''
for child in rec.children:
# Check to see if the filename of this child is the same as the
# last one, and if so, skip the child. This can happen if we
# have very large files with more than one directory entry.
fi = child.file_identifier()
if fi == last:
continue
last = fi
if child.rock_ridge is not None and child.rock_ridge.child_link_record_exists() and child.rock_ridge.cl_to_moved_dr is not None and child.rock_ridge.cl_to_moved_dr.parent is not None:
# If this is the case, this is a relocated entry. We actually
# want to go find the entry this was relocated to; we do that
# by following the child_link, then going up to the parent and
# finding the entry that links to the same one as this one.
cl_parent = child.rock_ridge.cl_to_moved_dr.parent
for cl_child in cl_parent.children:
if cl_child.rock_ridge is not None and cl_child.rock_ridge.name() == child.rock_ridge.name():
child = cl_child
break
# If we ended up not finding the right one in the parent of the
# moved entry, weird, but just return the one we would have
# anyway.
yield child | [] |
Please provide a description of the function:def _assign_udf_desc_extents(descs, start_extent):
# type: (PyCdlib._UDFDescriptors, int) -> None
'''
An internal function to assign a consecutive sequence of extents for the
given set of UDF Descriptors, starting at the given extent.
Parameters:
descs - The PyCdlib._UDFDescriptors object to assign extents for.
start_extent - The starting extent to assign from.
Returns:
Nothing.
'''
current_extent = start_extent
descs.pvd.set_extent_location(current_extent)
current_extent += 1
descs.impl_use.set_extent_location(current_extent)
current_extent += 1
descs.partition.set_extent_location(current_extent)
current_extent += 1
descs.logical_volume.set_extent_location(current_extent)
current_extent += 1
descs.unallocated_space.set_extent_location(current_extent)
current_extent += 1
descs.terminator.set_extent_location(current_extent)
current_extent += 1 | [] |
Please provide a description of the function:def _find_dr_record_by_name(vd, path, encoding):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, str) -> dr.DirectoryRecord
'''
An internal function to find an directory record on the ISO given an ISO
or Joliet path. If the entry is found, it returns the directory record
object corresponding to that entry. If the entry could not be found,
a pycdlibexception.PyCdlibInvalidInput exception is raised.
Parameters:
vd - The Volume Descriptor to look in for the Directory Record.
path - The ISO or Joliet entry to find the Directory Record for.
encoding - The string encoding used for the path.
Returns:
The directory record entry representing the entry on the ISO.
'''
if not utils.starts_with_slash(path):
raise pycdlibexception.PyCdlibInvalidInput('Must be a path starting with /')
root_dir_record = vd.root_directory_record()
# If the path is just the slash, we just want the root directory, so
# get the child there and quit.
if path == b'/':
return root_dir_record
# Split the path along the slashes
splitpath = utils.split_path(path)
currpath = splitpath.pop(0).decode('utf-8').encode(encoding)
entry = root_dir_record
tmpdr = dr.DirectoryRecord()
while True:
child = None
thelist = entry.children
lo = 2
hi = len(thelist)
while lo < hi:
mid = (lo + hi) // 2
tmpdr.file_ident = currpath
if thelist[mid] < tmpdr:
lo = mid + 1
else:
hi = mid
index = lo
if index != len(thelist) and thelist[index].file_ident == currpath:
child = thelist[index]
if child is None:
# We failed to find this component of the path, so break out of the
# loop and fail
break
if child.rock_ridge is not None and child.rock_ridge.child_link_record_exists():
# Here, the rock ridge extension has a child link, so we
# need to follow it.
child = child.rock_ridge.cl_to_moved_dr
if child is None:
break
# We found the child, and it is the last one we are looking for;
# return it.
if not splitpath:
return child
if not child.is_dir():
break
entry = child
currpath = splitpath.pop(0).decode('utf-8').encode(encoding)
raise pycdlibexception.PyCdlibInvalidInput('Could not find path') | [] |
Please provide a description of the function:def read(self, size=None):
# type: (Optional[int]) -> bytes
'''
A method to read and return up to size bytes.
Parameters:
size - Optional parameter to read size number of bytes; if None or
negative, all remaining bytes in the file will be read
Returns:
The number of bytes requested or the rest of the data left in the file,
whichever is smaller. If the file is at or past EOF, returns an empty
bytestring.
'''
if not self._open:
raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.')
if self._offset >= self._length:
return b''
if size is None or size < 0:
data = self.readall()
else:
readsize = min(self._length - self._offset, size)
data = self._fp.read(readsize)
self._offset += readsize
return data | [] |
Please provide a description of the function:def readall(self):
# type: () -> bytes
'''
A method to read and return the remaining bytes in the file.
Parameters:
None.
Returns:
The rest of the data left in the file. If the file is at or past EOF,
returns an empty bytestring.
'''
if not self._open:
raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.')
readsize = self._length - self._offset
if readsize > 0:
data = self._fp.read(readsize)
self._offset += readsize
else:
data = b''
return data | [] |
Please provide a description of the function:def seek(self, offset, whence=0):
# type: (int, int) -> int
'''
A method to change the stream position to byte offset offset. The
offset is interpreted relative to the position indicated by whence.
Valid values for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Parameters:
offset - The byte offset to seek to.
whence - The position in the file to start from (0 for start, 1 for
current, 2 for end)
Returns:
The new absolute position.
'''
if not self._open:
raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.')
if isinstance(offset, float):
raise pycdlibexception.PyCdlibInvalidInput('an integer is required')
if whence == 0:
# From beginning of file
if offset < 0:
raise pycdlibexception.PyCdlibInvalidInput('Invalid offset value (must be positive)')
if offset < self._length:
self._fp.seek(self._startpos + offset, 0)
self._offset = offset
elif whence == 1:
# From current file position
if self._offset + offset < 0:
raise pycdlibexception.PyCdlibInvalidInput('Invalid offset value (cannot seek before start of file)')
if self._offset + offset < self._length:
self._fp.seek(self._startpos + self._offset + offset, 0)
self._offset += offset
elif whence == 2:
# From end of file
if offset < 0 and abs(offset) > self._length:
raise pycdlibexception.PyCdlibInvalidInput('Invalid offset value (cannot seek before start of file)')
if self._length + offset < self._length:
self._fp.seek(self._length + offset, 0)
self._offset = self._length + offset
else:
raise pycdlibexception.PyCdlibInvalidInput('Invalid value for whence (options are 0, 1, and 2)')
return self._offset | [] |
Please provide a description of the function:def _parse_volume_descriptors(self):
# type: () -> None
'''
An internal method to parse the volume descriptors on an ISO.
Parameters:
None.
Returns:
Nothing.
'''
# Ecma-119 says that the Volume Descriptor set is a sequence of volume
# descriptors recorded in consecutively numbered Logical Sectors
# starting with Logical Sector Number 16. Since sectors are 2048 bytes
# in length, we start at sector 16 * 2048
# Ecma-119, 6.2.1 says that the Volume Space is divided into a System
# Area and a Data Area, where the System Area is in logical sectors 0
# to 15, and whose contents is not specified by the standard.
self._cdfp.seek(16 * 2048)
while True:
# All volume descriptors are exactly 2048 bytes long
curr_extent = self._cdfp.tell() // 2048
vd = self._cdfp.read(2048)
if len(vd) != 2048:
raise pycdlibexception.PyCdlibInvalidISO('Failed to read entire volume descriptor')
(desc_type, ident) = struct.unpack_from('=B5s', vd, 0)
if desc_type not in (headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY,
headervd.VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR,
headervd.VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD,
headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY) or ident not in (b'CD001', b'BEA01', b'NSR02', b'TEA01'):
# We read the next extent, and it wasn't a descriptor. Abort
# the loop, remembering to back up the input file descriptor.
self._cdfp.seek(-2048, os.SEEK_CUR)
break
if desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY:
pvd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY)
pvd.parse(vd, curr_extent)
self.pvds.append(pvd)
elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR:
vdst = headervd.VolumeDescriptorSetTerminator()
vdst.parse(vd, curr_extent)
self.vdsts.append(vdst)
elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD:
# Both an Ecma-119 Boot Record and a Ecma-TR 071 UDF-Bridge
# Beginning Extended Area Descriptor have the first byte as 0,
# so we can't tell which it is until we look at the next 5
# bytes (Boot Record will have 'CD001', BEAD will have 'BEA01').
if ident == b'CD001':
br = headervd.BootRecord()
br.parse(vd, curr_extent)
self.brs.append(br)
elif ident == b'BEA01':
self._has_udf = True
self.udf_bea.parse(vd, curr_extent)
elif ident == b'NSR02':
self.udf_nsr.parse(vd, curr_extent)
elif ident == b'TEA01':
self.udf_tea.parse(vd, curr_extent)
else:
# This isn't really possible, since we would have aborted
# the loop above.
raise pycdlibexception.PyCdlibInvalidISO('Invalid volume identification type')
elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY:
svd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY)
svd.parse(vd, curr_extent)
self.svds.append(svd)
# Since we checked for the valid descriptors above, it is impossible
# to see an invalid desc_type here, so no check necessary.
# The language in Ecma-119, p.8, Section 6.7.1 says:
#
# The sequence shall contain one Primary Volume Descriptor (see 8.4) recorded at least once.
#
# The important bit there is "at least one", which means that we have
# to accept ISOs with more than one PVD.
if not self.pvds:
raise pycdlibexception.PyCdlibInvalidISO('Valid ISO9660 filesystems must have at least one PVD')
self.pvd = self.pvds[0]
# Make sure any other PVDs agree with the first one.
for pvd in self.pvds[1:]:
if pvd != self.pvd:
raise pycdlibexception.PyCdlibInvalidISO('Multiple occurrences of PVD did not agree!')
pvd.root_dir_record = self.pvd.root_dir_record
if not self.vdsts:
raise pycdlibexception.PyCdlibInvalidISO('Valid ISO9660 filesystems must have at least one Volume Descriptor Set Terminator') | [] |
Please provide a description of the function:def _seek_to_extent(self, extent):
# type: (int) -> None
'''
An internal method to seek to a particular extent on the input ISO.
Parameters:
extent - The extent to seek to.
Returns:
Nothing.
'''
self._cdfp.seek(extent * self.pvd.logical_block_size()) | [] |
Please provide a description of the function:def _find_rr_record(self, rr_path):
# type: (bytes) -> dr.DirectoryRecord
'''
An internal method to find an directory record on the ISO given a Rock
Ridge path. If the entry is found, it returns the directory record
object corresponding to that entry. If the entry could not be found, a
pycdlibexception.PyCdlibInvalidInput is raised.
Parameters:
rr_path - The Rock Ridge path to lookup.
Returns:
The directory record entry representing the entry on the ISO.
'''
if not utils.starts_with_slash(rr_path):
raise pycdlibexception.PyCdlibInvalidInput('Must be a path starting with /')
root_dir_record = self.pvd.root_directory_record()
# If the path is just the slash, we just want the root directory, so
# get the child there and quit.
if rr_path == b'/':
return root_dir_record
# Split the path along the slashes
splitpath = utils.split_path(rr_path)
currpath = splitpath.pop(0).decode('utf-8').encode('utf-8')
entry = root_dir_record
while True:
child = None
thelist = entry.rr_children
lo = 0
hi = len(thelist)
while lo < hi:
mid = (lo + hi) // 2
tmpchild = thelist[mid]
if tmpchild.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidInput('Record without Rock Ridge entry on Rock Ridge ISO')
if tmpchild.rock_ridge.name() < currpath:
lo = mid + 1
else:
hi = mid
index = lo
tmpchild = thelist[index]
if index != len(thelist) and tmpchild.rock_ridge is not None and tmpchild.rock_ridge.name() == currpath:
child = thelist[index]
if child is None:
# We failed to find this component of the path, so break out of the
# loop and fail
break
if child.rock_ridge is not None and child.rock_ridge.child_link_record_exists():
# Here, the rock ridge extension has a child link, so we
# need to follow it.
child = child.rock_ridge.cl_to_moved_dr
if child is None:
break
# We found the child, and it is the last one we are looking for;
# return it.
if not splitpath:
return child
if not child.is_dir():
break
entry = child
currpath = splitpath.pop(0).decode('utf-8').encode('utf-8')
raise pycdlibexception.PyCdlibInvalidInput('Could not find path') | [] |
Please provide a description of the function:def _find_joliet_record(self, joliet_path):
# type: (bytes) -> dr.DirectoryRecord
'''
An internal method to find an directory record on the ISO given a Joliet
path. If the entry is found, it returns the directory record object
corresponding to that entry. If the entry could not be found, a
pycdlibexception.PyCdlibInvalidInput is raised.
Parameters:
joliet_path - The Joliet path to lookup.
Returns:
The directory record entry representing the entry on the ISO.
'''
if self.joliet_vd is None:
raise pycdlibexception.PyCdlibInternalError('Joliet path requested on non-Joliet ISO')
return _find_dr_record_by_name(self.joliet_vd, joliet_path, 'utf-16_be') | [] |
Please provide a description of the function:def _find_udf_record(self, udf_path):
# type: (bytes) -> Tuple[Optional[udfmod.UDFFileIdentifierDescriptor], udfmod.UDFFileEntry]
'''
An internal method to find an directory record on the ISO given a UDF
path. If the entry is found, it returns the directory record object
corresponding to that entry. If the entry could not be found, a
pycdlibexception.PyCdlibInvalidInput is raised.
Parameters:
udf_path - The UDF path to lookup.
Returns:
The UDF File Entry representing the entry on the ISO.
'''
# If the path is just the slash, we just want the root directory, so
# get the child there and quit.
if udf_path == b'/':
return None, self.udf_root # type: ignore
# Split the path along the slashes
splitpath = utils.split_path(udf_path)
currpath = splitpath.pop(0)
entry = self.udf_root
while entry is not None:
child = entry.find_file_ident_desc_by_name(currpath)
# We found the child, and it is the last one we are looking for;
# return it.
if not splitpath:
return child, child.file_entry # type: ignore
if not child.is_dir():
break
entry = child.file_entry
currpath = splitpath.pop(0)
raise pycdlibexception.PyCdlibInvalidInput('Could not find path') | [] |
Please provide a description of the function:def _iso_name_and_parent_from_path(self, iso_path):
# type: (bytes) -> Tuple[bytes, dr.DirectoryRecord]
'''
An internal method to find the parent directory record and name given an
ISO path. If the parent is found, return a tuple containing the
basename of the path and the parent directory record object.
Parameters:
iso_path - The absolute ISO path to the entry on the ISO.
Returns:
A tuple containing just the name of the entry and a Directory Record
object representing the parent of the entry.
'''
splitpath = utils.split_path(iso_path)
name = splitpath.pop()
parent = self._find_iso_record(b'/' + b'/'.join(splitpath))
return (name.decode('utf-8').encode('utf-8'), parent) | [] |
Please provide a description of the function:def _joliet_name_and_parent_from_path(self, joliet_path):
# type: (bytes) -> Tuple[bytes, dr.DirectoryRecord]
'''
An internal method to find the parent directory record and name given a
Joliet path. If the parent is found, return a tuple containing the
basename of the path and the parent directory record object.
Parameters:
joliet_path - The absolute Joliet path to the entry on the ISO.
Returns:
A tuple containing just the name of the entry and a Directory Record
object representing the parent of the entry.
'''
splitpath = utils.split_path(joliet_path)
name = splitpath.pop()
if len(name) > 64:
raise pycdlibexception.PyCdlibInvalidInput('Joliet names can be a maximum of 64 characters')
parent = self._find_joliet_record(b'/' + b'/'.join(splitpath))
return (name.decode('utf-8').encode('utf-16_be'), parent) | [] |
Please provide a description of the function:def _udf_name_and_parent_from_path(self, udf_path):
# type: (bytes) -> Tuple[bytes, udfmod.UDFFileEntry]
'''
An internal method to find the parent directory record and name given a
UDF path. If the parent is found, return a tuple containing the basename
of the path and the parent UDF File Entry object.
Parameters:
udf_path - The absolute UDF path to the entry on the ISO.
Returns:
A tuple containing just the name of the entry and a UDF File Entry
object representing the parent of the entry.
'''
splitpath = utils.split_path(udf_path)
name = splitpath.pop()
(parent_ident_unused, parent) = self._find_udf_record(b'/' + b'/'.join(splitpath))
return (name.decode('utf-8').encode('utf-8'), parent) | [] |
Please provide a description of the function:def _set_rock_ridge(self, rr):
# type: (str) -> None
'''
An internal method to set the Rock Ridge version of the ISO given the
Rock Ridge version of the previous entry.
Parameters:
rr - The version of rr from the last directory record.
Returns:
Nothing.
'''
# We don't allow mixed Rock Ridge versions on the ISO, so apply some
# checking. If the current overall Rock Ridge version on the ISO is
# None, we upgrade it to whatever version we were given. Once we have
# seen a particular version, we only allow records of that version or
# None (to account for dotdot records which have no Rock Ridge).
if not self.rock_ridge:
self.rock_ridge = rr # type: str
else:
for ver in ['1.09', '1.10', '1.12']:
if self.rock_ridge == ver:
if rr and rr != ver:
raise pycdlibexception.PyCdlibInvalidISO('Inconsistent Rock Ridge versions on the ISO!') | [] |
Please provide a description of the function:def _walk_directories(self, vd, extent_to_ptr, extent_to_inode, path_table_records):
# type: (headervd.PrimaryOrSupplementaryVD, Dict[int, path_table_record.PathTableRecord], Dict[int, inode.Inode], List[path_table_record.PathTableRecord]) -> Tuple[int, int]
'''
An internal method to walk the directory records in a volume descriptor,
starting with the root. For each child in the directory record,
we create a new dr.DirectoryRecord object and append it to the parent.
Parameters:
vd - The volume descriptor to walk.
extent_to_ptr - A dictionary mapping extents to PTRs.
extent_to_inode - A dictionary mapping extents to Inodes.
path_table_records - The list of path table records.
Returns:
The interchange level that this ISO conforms to.
'''
cdfp = self._cdfp
old_loc = cdfp.tell()
cdfp.seek(0, os.SEEK_END)
iso_file_length = cdfp.tell()
cdfp.seek(old_loc)
all_extent_to_dr = {} # type: Dict[int, dr.DirectoryRecord]
is_pvd = vd.is_pvd()
root_dir_record = vd.root_directory_record()
root_dir_record.set_ptr(path_table_records[0])
interchange_level = 1
block_size = vd.logical_block_size()
parent_links = []
child_links = []
lastbyte = 0
dirs = collections.deque([root_dir_record])
while dirs:
dir_record = dirs.popleft()
self._seek_to_extent(dir_record.extent_location())
length = dir_record.get_data_length()
offset = 0
last_record = None # type: Optional[dr.DirectoryRecord]
data = cdfp.read(length)
while offset < length:
if offset > (len(data) - 1):
# The data we read off of the ISO was shorter than what we
# expected. The ISO is corrupt, throw an error.
raise pycdlibexception.PyCdlibInvalidISO('Invalid directory record')
lenbyte = bytearray([data[offset]])[0]
if lenbyte == 0:
# If we saw a zero length, this is probably the padding for
# the end of this extent. Move the offset to the start of
# the next extent.
padsize = block_size - (offset % block_size)
if data[offset:offset + padsize] != b'\x00' * padsize:
# For now we are pedantic, and if the padding bytes
# are not all zero we throw an Exception. Depending
# one what we see in the wild, we may have to loosen
# this check.
raise pycdlibexception.PyCdlibInvalidISO('Invalid padding on ISO')
offset = offset + padsize
continue
new_record = dr.DirectoryRecord()
rr = new_record.parse(vd, data[offset:offset + lenbyte],
dir_record)
offset += lenbyte
# The parse method of dr.DirectoryRecord returns '' if this
# record doesn't have Rock Ridge extensions, or the version of
# the Rock Ridge extension (as detected for this directory record).
self._set_rock_ridge(rr)
# Cache some properties of this record for later use.
is_symlink = new_record.rock_ridge is not None and new_record.rock_ridge.is_symlink()
dots = new_record.is_dot() or new_record.is_dotdot()
rr_cl = new_record.rock_ridge is not None and new_record.rock_ridge.child_link_record_exists()
is_dir = new_record.is_dir()
data_length = new_record.get_data_length()
new_extent_loc = new_record.extent_location()
if is_pvd and not dots and not rr_cl and not is_symlink and new_extent_loc not in all_extent_to_dr:
all_extent_to_dr[new_extent_loc] = new_record
# ISO generation programs sometimes use random extent locations
# for zero-length files. Thus, it is not valid for us to link
# zero-length files to other files, as the linkage will be
# essentially random. Make sure we ignore zero-length files
# (which includes symlinks) for linkage. Similarly, we don't
# do the lastbyte calculation on zero-length files for the same
# reason.
if not is_dir:
len_to_use = data_length
extent_to_use = new_extent_loc
# An important side-effect of this is that zero-length files
# or symlinks get an inode, but it is always set to length 0
# and location 0 and not actually written out. This is so
# that we can 'link' everything through the Inode.
if len_to_use == 0 or is_symlink:
len_to_use = 0
extent_to_use = 0
# Directory Records that point to the El Torito Boot Catalog
# do not get Inodes since all of that is handled in-memory.
if self.eltorito_boot_catalog is not None and extent_to_use == self.eltorito_boot_catalog.extent_location():
self.eltorito_boot_catalog.add_dirrecord(new_record)
else:
# For all real files, we create an inode that points to
# the location on disk.
if extent_to_use in extent_to_inode:
ino = extent_to_inode[extent_to_use]
else:
ino = inode.Inode()
ino.parse(extent_to_use, len_to_use, cdfp,
block_size)
extent_to_inode[extent_to_use] = ino
self.inodes.append(ino)
ino.linked_records.append(new_record)
new_record.inode = ino
new_end = extent_to_use * block_size + len_to_use
if new_end > iso_file_length:
# In this case, the end of the file is beyond the size
# of the file. Since this can't possibly work, truncate
# the file size.
if new_record.inode is not None:
new_record.inode.data_length = iso_file_length - extent_to_use * block_size
for rec in new_record.inode.linked_records:
rec.set_data_length(new_end)
else:
# In this case, the new end is still within the file
# size, but the PVD size is wrong. Set the lastbyte
# appropriately, which will eventually be used to fix
# the PVD size.
lastbyte = max(lastbyte, new_end)
if new_record.rock_ridge is not None and new_record.rock_ridge.dr_entries.ce_record is not None:
ce_record = new_record.rock_ridge.dr_entries.ce_record
orig_pos = cdfp.tell()
self._seek_to_extent(ce_record.bl_cont_area)
cdfp.seek(ce_record.offset_cont_area, os.SEEK_CUR)
con_block = cdfp.read(ce_record.len_cont_area)
new_record.rock_ridge.parse(con_block, False,
new_record.rock_ridge.bytes_to_skip,
True)
cdfp.seek(orig_pos)
block = self.pvd.track_rr_ce_entry(ce_record.bl_cont_area,
ce_record.offset_cont_area,
ce_record.len_cont_area)
new_record.rock_ridge.update_ce_block(block)
if rr_cl:
child_links.append(new_record)
if is_dir:
if new_record.rock_ridge is not None and new_record.rock_ridge.relocated_record():
self._rr_moved_record = new_record
if new_record.is_dotdot() and new_record.rock_ridge is not None and new_record.rock_ridge.parent_link_record_exists():
# If this is the dotdot record, and it has a parent
# link record, make sure to link up the parent link
# directory record.
parent_links.append(new_record)
if not dots and not rr_cl:
dirs.append(new_record)
new_record.set_ptr(extent_to_ptr[new_extent_loc])
if new_record.parent is None:
raise pycdlibexception.PyCdlibInternalError('Trying to track child with no parent')
try_long_entry = False
try:
new_record.parent.track_child(new_record, block_size)
except pycdlibexception.PyCdlibInvalidInput:
# dir_record.track_child() may throw a PyCdlibInvalidInput if it
# saw a duplicate child. However, we allow duplicate children
# iff this record is a file and the last child has the same name;
# this means we have a very long entry. If that is not the case,
# re-raise the error, otherwise pass through to try with the
# allow_duplicates flag set to True.
if new_record.is_dir() or last_record is None or last_record.file_identifier() != new_record.file_identifier():
raise
else:
try_long_entry = True
if try_long_entry:
new_record.parent.track_child(new_record, block_size, True)
if is_pvd:
if new_record.is_dir():
new_level = _interchange_level_from_directory(new_record.file_identifier())
else:
new_level = _interchange_level_from_filename(new_record.file_identifier())
interchange_level = max(interchange_level, new_level)
last_record = new_record
for pl in parent_links:
if pl.rock_ridge is not None:
pl.rock_ridge.parent_link = all_extent_to_dr[pl.rock_ridge.parent_link_extent()]
for cl in child_links:
if cl.rock_ridge is not None:
cl.rock_ridge.cl_to_moved_dr = all_extent_to_dr[cl.rock_ridge.child_link_extent()]
if cl.rock_ridge.cl_to_moved_dr.rock_ridge is not None:
cl.rock_ridge.cl_to_moved_dr.rock_ridge.moved_to_cl_dr = cl
return interchange_level, lastbyte | [] |
Please provide a description of the function:def _initialize(self):
# type: () -> None
'''
An internal method to re-initialize the object. Called from
both __init__ and close.
Parameters:
None.
Returns:
Nothing.
'''
self._cdfp = BytesIO()
self.svds = [] # type: List[headervd.PrimaryOrSupplementaryVD]
self.brs = [] # type: List[headervd.BootRecord]
self.vdsts = [] # type: List[headervd.VolumeDescriptorSetTerminator]
self.eltorito_boot_catalog = None # type: Optional[eltorito.EltoritoBootCatalog]
self._initialized = False
self.rock_ridge = ''
self.isohybrid_mbr = None # type: Optional[isohybrid.IsoHybrid]
self.xa = False
self._managing_fp = False
self.pvds = [] # type: List[headervd.PrimaryOrSupplementaryVD]
self._has_udf = False
self.udf_bea = udfmod.BEAVolumeStructure() # type: udfmod.BEAVolumeStructure
self.udf_nsr = udfmod.NSRVolumeStructure() # type: udfmod.NSRVolumeStructure
self.udf_tea = udfmod.TEAVolumeStructure() # type: udfmod.TEAVolumeStructure
self.udf_anchors = [] # type: List[udfmod.UDFAnchorVolumeStructure]
self.udf_main_descs = self._UDFDescriptors()
self.udf_reserve_descs = self._UDFDescriptors()
self.udf_logical_volume_integrity = udfmod.UDFLogicalVolumeIntegrityDescriptor()
self.udf_logical_volume_integrity_terminator = udfmod.UDFTerminatingDescriptor()
self.udf_root = None # type: Optional[udfmod.UDFFileEntry]
self.udf_file_set = udfmod.UDFFileSetDescriptor()
self.udf_file_set_terminator = udfmod.UDFTerminatingDescriptor()
self._needs_reshuffle = False
self._rr_moved_record = None # type: ignore
self._rr_moved_name = None # type: Optional[bytes]
self._rr_moved_rr_name = None # type: Optional[bytes]
self.enhanced_vd = None # type: Optional[headervd.PrimaryOrSupplementaryVD]
self.joliet_vd = None # type: Optional[headervd.PrimaryOrSupplementaryVD]
self._find_iso_record.cache_clear() # pylint: disable=no-member
self._find_rr_record.cache_clear() # pylint: disable=no-member
self._find_joliet_record.cache_clear() # pylint: disable=no-member
self._find_udf_record.cache_clear() # pylint: disable=no-member
self._write_check_list = [] # type: List[PyCdlib._WriteRange]
self.version_vd = None # type: Optional[headervd.VersionVolumeDescriptor]
self.inodes = [] | [] |
Please provide a description of the function:def _parse_path_table(self, ptr_size, extent):
# type: (int, int) -> Tuple[List[path_table_record.PathTableRecord], Dict[int, path_table_record.PathTableRecord]]
'''
An internal method to parse a path table on an ISO. For each path
table entry found, a Path Table Record object is created, and the
callback is called.
Parameters:
vd - The volume descriptor that these path table records correspond to.
extent - The extent at which this path table record starts.
callback - The callback to call for each path table record.
Returns:
A tuple consisting of the list of path table record entries and a
dictionary of the extent locations to the path table record entries.
'''
self._seek_to_extent(extent)
data = self._cdfp.read(ptr_size)
offset = 0
out = []
extent_to_ptr = {}
while offset < ptr_size:
ptr = path_table_record.PathTableRecord()
len_di_byte = bytearray([data[offset]])[0]
read_len = path_table_record.PathTableRecord.record_length(len_di_byte)
ptr.parse(data[offset:offset + read_len])
out.append(ptr)
extent_to_ptr[ptr.extent_location] = ptr
offset += read_len
return out, extent_to_ptr | [] |
Please provide a description of the function:def _check_and_parse_eltorito(self, br):
# type: (headervd.BootRecord) -> None
'''
An internal method to examine a Boot Record and see if it is an
El Torito Boot Record. If it is, parse the El Torito Boot Catalog,
verification entry, initial entry, and any additional section entries.
Parameters:
br - The boot record to examine for an El Torito signature.
Returns:
Nothing.
'''
if br.boot_system_identifier != b'EL TORITO SPECIFICATION'.ljust(32, b'\x00'):
return
if self.eltorito_boot_catalog is not None:
raise pycdlibexception.PyCdlibInvalidISO('Only one El Torito boot record is allowed')
# According to the El Torito specification, section 2.0, the El
# Torito boot record must be at extent 17.
if br.extent_location() != 17:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Boot Record must be at extent 17')
# Now that we have verified that the BootRecord is an El Torito one
# and that it is sane, we go on to parse the El Torito Boot Catalog.
# Note that the Boot Catalog is stored as a file in the ISO, though
# we ignore that for the purposes of parsing.
self.eltorito_boot_catalog = eltorito.EltoritoBootCatalog(br)
eltorito_boot_catalog_extent, = struct.unpack_from('=L', br.boot_system_use[:4], 0)
old = self._cdfp.tell()
self._cdfp.seek(eltorito_boot_catalog_extent * self.pvd.logical_block_size())
data = self._cdfp.read(32)
while not self.eltorito_boot_catalog.parse(data):
data = self._cdfp.read(32)
self._cdfp.seek(old) | [] |
Please provide a description of the function:def _reshuffle_extents(self):
# type: () -> None
'''
An internal method that is one of the keys of PyCdlib's ability to keep
the in-memory metadata consistent at all times. After making any
changes to the ISO, most API calls end up calling this method. This
method will run through the entire ISO, assigning extents to each of
the pieces of the ISO that exist. This includes the Primary Volume
Descriptor (which is fixed at extent 16), the Boot Records (including
El Torito), the Supplementary Volume Descriptors (including Joliet),
the Volume Descriptor Terminators, the Version Descriptor, the Primary
Volume Descriptor Path Table Records (little and big endian), the
Supplementary Volume Descriptor Path Table Records (little and big
endian), the Primary Volume Descriptor directory records, the
Supplementary Volume Descriptor directory records, the Rock Ridge ER
sector, the El Torito Boot Catalog, the El Torito Initial Entry, and
finally the data for the files.
Parameters:
None.
Returns:
Nothing.
'''
current_extent = 16
for pvd in self.pvds:
pvd.set_extent_location(current_extent)
current_extent += 1
for br in self.brs:
br.set_extent_location(current_extent)
current_extent += 1
for svd in self.svds:
svd.set_extent_location(current_extent)
current_extent += 1
for vdst in self.vdsts:
vdst.set_extent_location(current_extent)
current_extent += 1
if self._has_udf:
self.udf_bea.set_extent_location(current_extent)
current_extent += 1
self.udf_nsr.set_extent_location(current_extent)
current_extent += 1
self.udf_tea.set_extent_location(current_extent)
current_extent += 1
if self.version_vd is not None:
# Save off an extent for the version descriptor
self.version_vd.set_extent_location(current_extent)
current_extent += 1
part_start = 0
log_block_size = self.pvd.logical_block_size()
udf_files = [] # type: List[inode.Inode]
linked_inodes = {} # type: Dict[int, bool]
if self._has_udf:
if current_extent > 32:
# There is no *requirement* in the UDF specification that the
# UDF Volume Descriptor Sequence starts at extent 32. It can
# start anywhere between extents 16 and 256, as long as the
# ISO9660 volume descriptors, the UDF Bridge Volume Recognition
# Sequence, Main Volume Descriptor Sequence, Reserve Volume
# Descriptor Sequence, and Logical Volume Integrity Sequence all
# all fit, in that order. The only way that all of these volume
# descriptors would not fit between extents 16 and 32 is in the
# case of many duplicate PVDs, many VDSTs, or similar. Since
# that is unlikely, for now we maintain compatibility with
# genisoimage and force the UDF Main Descriptor Sequence to
# start at 32. We can change this later if needed.
raise pycdlibexception.PyCdlibInternalError('Too many ISO9660 volume descriptors to fit UDF')
current_extent = 32
_assign_udf_desc_extents(self.udf_main_descs, current_extent)
# ECMA TR-071 2.6 says that the volume sequence will be exactly 16
# extents long, and we know we started at 32, so make it exactly 48.
current_extent = 48
_assign_udf_desc_extents(self.udf_reserve_descs, current_extent)
# ECMA TR-071 2.6 says that the volume sequence will be exactly 16
# extents long, and we know we started at 48, so make it exactly 64.
current_extent = 64
self.udf_logical_volume_integrity.set_extent_location(current_extent)
self.udf_main_descs.logical_volume.set_integrity_location(current_extent)
self.udf_reserve_descs.logical_volume.set_integrity_location(current_extent)
current_extent += 1
self.udf_logical_volume_integrity_terminator.set_extent_location(current_extent)
current_extent += 1
# Now assign the first UDF anchor at 256
if len(self.udf_anchors) != 2:
raise pycdlibexception.PyCdlibInternalError('Expected 2 UDF anchors')
# We know that the first anchor is hard-coded at extent 256. We
# will have to assign the other one at the end, since it is the
# last extent
current_extent = 256
self.udf_anchors[0].set_extent_location(current_extent,
self.udf_main_descs.pvd.extent_location(),
self.udf_reserve_descs.pvd.extent_location())
current_extent += 1
# Now assign the UDF File Set Descriptor to the beginning of the partition.
part_start = current_extent
self.udf_file_set.set_extent_location(part_start)
self.udf_main_descs.partition.set_start_location(part_start)
self.udf_reserve_descs.partition.set_start_location(part_start)
current_extent += 1
self.udf_file_set_terminator.set_extent_location(current_extent,
current_extent - part_start)
current_extent += 1
# Assignment of extents to UDF is somewhat complicated. UDF
# filesystems are laid out by having one extent containing a
# File Entry that describes a directory or a file, followed by
# an extent that contains the entries in the case of a directory.
# All File Entries and entries containing File Identifier
# descriptors are laid out ahead of File Entries for files. The
# implementation below alternates assignment to File Entries and
# File Descriptors for all directories, and then assigns to all
# files. Note that data for files is assigned in the 'normal'
# file assignment below.
# First assign directories.
if self.udf_root is None:
raise pycdlibexception.PyCdlibInternalError('ISO has UDF but no UDF root; this should never happen')
udf_file_assign_list = []
udf_file_entries = collections.deque([(self.udf_root, None)]) # type: Deque[Tuple[udfmod.UDFFileEntry, Optional[udfmod.UDFFileIdentifierDescriptor]]]
while udf_file_entries:
udf_file_entry, fi_desc = udf_file_entries.popleft()
# In theory we should check for and skip the work for files and
# symlinks, but they will never be added to 'udf_file_entries'
# to begin with so we can safely ignore them.
# Set the location that the File Entry lives at, and update
# the File Identifier Descriptor that points to it (for all
# but the root).
udf_file_entry.set_extent_location(current_extent,
current_extent - part_start)
if fi_desc is not None:
fi_desc.set_icb(current_extent, current_extent - part_start)
current_extent += 1
# Now assign where the File Entry points to; for files this
# is overwritten later, but for directories this tells us where
# to find the extent containing the list of File Identifier
# Descriptors that are in this directory.
udf_file_entry.set_data_location(current_extent, current_extent - part_start)
offset = 0
for d in udf_file_entry.fi_descs:
if offset >= log_block_size:
# The offset has spilled over into a new extent.
# Increase the current extent by one, and update the
# offset. Note that the offset does not go to 0, since
# UDF allows File Identifier Descs to span extents.
# Instead, it is the current offset minus the size of a
# block (say 2050 - 2048, leaving us at offset 2).
current_extent += 1
offset = offset - log_block_size
d.set_extent_location(current_extent, current_extent - part_start)
if not d.is_parent() and d.file_entry is not None:
if d.is_dir():
udf_file_entries.append((d.file_entry, d))
else:
udf_file_assign_list.append((d.file_entry, d))
offset += udfmod.UDFFileIdentifierDescriptor.length(len(d.fi))
if offset > log_block_size:
current_extent += 1
current_extent += 1
# Now assign files (this includes symlinks).
udf_file_entry_inodes_assigned = {} # type: Dict[int, bool]
for udf_file_assign_entry, fi_desc in udf_file_assign_list:
if udf_file_assign_entry is None:
continue
if udf_file_assign_entry.inode is not None and id(udf_file_assign_entry.inode) in udf_file_entry_inodes_assigned:
continue
udf_file_assign_entry.set_extent_location(current_extent, current_extent - part_start)
fi_desc.set_icb(current_extent, current_extent - part_start)
if udf_file_assign_entry.inode is not None:
# The data location for files will be set later.
if udf_file_assign_entry.inode.get_data_length() > 0:
udf_files.append(udf_file_assign_entry.inode)
for rec in udf_file_assign_entry.inode.linked_records:
if isinstance(rec, udfmod.UDFFileEntry):
rec.set_extent_location(current_extent, current_extent - part_start)
if rec.file_ident is not None:
rec.file_ident.set_icb(current_extent, current_extent - part_start)
udf_file_entry_inodes_assigned[id(udf_file_assign_entry.inode)] = True
current_extent += 1
self.udf_logical_volume_integrity.logical_volume_contents_use.unique_id = current_extent
# Next up, put the path table records in the right place.
for pvd in self.pvds:
pvd.path_table_location_le = current_extent
current_extent += self.pvd.path_table_num_extents
for pvd in self.pvds:
pvd.path_table_location_be = current_extent
current_extent += self.pvd.path_table_num_extents
if self.enhanced_vd is not None:
self.enhanced_vd.path_table_location_le = self.pvd.path_table_location_le
self.enhanced_vd.path_table_location_be = self.pvd.path_table_location_be
if self.joliet_vd is not None:
self.joliet_vd.path_table_location_le = current_extent
current_extent += self.joliet_vd.path_table_num_extents
self.joliet_vd.path_table_location_be = current_extent
current_extent += self.joliet_vd.path_table_num_extents
self.pvd.clear_rr_ce_entries()
current_extent, pvd_files = _reassign_vd_dirrecord_extents(self.pvd,
current_extent)
joliet_files = [] # type: List[inode.Inode]
if self.joliet_vd is not None:
current_extent, joliet_files = _reassign_vd_dirrecord_extents(self.joliet_vd,
current_extent)
# The rock ridge 'ER' sector must be after all of the directory
# entries but before the file contents.
rr = self.pvd.root_directory_record().children[0].rock_ridge
if rr is not None and rr.dr_entries.ce_record is not None:
rr.dr_entries.ce_record.update_extent(current_extent)
current_extent += 1
def _set_inode(ino, current_extent, part_start):
# type: (inode.Inode, int, int) -> None
'''
Internal method to set the location of an inode and update the
metadata of all records attached to it.
Parameters:
ino - The inode to update.
current_extent - The extent to set the inode to.
part_start - The start of the partition that the inode is on.
Returns:
Nothing.
'''
ino.set_extent_location(current_extent)
for rec in ino.linked_records:
rec.set_data_location(current_extent, current_extent - part_start)
if self.eltorito_boot_catalog is not None:
self.eltorito_boot_catalog.update_catalog_extent(current_extent)
for rec in self.eltorito_boot_catalog.dirrecords:
rec.set_data_location(current_extent, current_extent - part_start)
current_extent += utils.ceiling_div(self.eltorito_boot_catalog.dirrecords[0].get_data_length(),
log_block_size)
entries_to_update = [self.eltorito_boot_catalog.initial_entry]
for sec in self.eltorito_boot_catalog.sections:
for entry in sec.section_entries:
entries_to_update.append(entry)
for entry in entries_to_update:
if id(entry.inode) in linked_inodes:
continue
entry.set_data_location(current_extent, current_extent - part_start)
if self.isohybrid_mbr is not None:
self.isohybrid_mbr.update_rba(current_extent)
_set_inode(entry.inode, current_extent, part_start)
linked_inodes[id(entry.inode)] = True
current_extent += utils.ceiling_div(entry.inode.get_data_length(),
log_block_size)
for ino in pvd_files + joliet_files + udf_files:
if id(ino) in linked_inodes:
# We've already assigned an extent because it was linked to an
# earlier entry.
continue
_set_inode(ino, current_extent, part_start)
linked_inodes[id(ino)] = True
current_extent += utils.ceiling_div(ino.get_data_length(),
log_block_size)
if self.enhanced_vd is not None:
loc = self.pvd.root_directory_record().extent_location()
self.enhanced_vd.root_directory_record().set_data_location(loc, loc)
if self.udf_anchors:
self.udf_anchors[1].set_extent_location(current_extent,
self.udf_main_descs.pvd.extent_location(),
self.udf_reserve_descs.pvd.extent_location())
if current_extent > self.pvd.space_size:
raise pycdlibexception.PyCdlibInternalError('Assigned an extent beyond the ISO (%d > %d)' % (current_extent, self.pvd.space_size))
self._needs_reshuffle = False | [] |
Please provide a description of the function:def _add_child_to_dr(self, child, logical_block_size):
# type: (dr.DirectoryRecord, int) -> int
'''
An internal method to add a child to a directory record, expanding the
space in the Volume Descriptor(s) if necessary.
Parameters:
child - The new child.
logical_block_size - The size of one logical block.
Returns:
The number of bytes to add for this directory record (this may be zero).
'''
if child.parent is None:
raise pycdlibexception.PyCdlibInternalError('Trying to add child without a parent')
try_long_entry = False
try:
ret = child.parent.add_child(child, logical_block_size)
except pycdlibexception.PyCdlibInvalidInput:
# dir_record.add_child() may throw a PyCdlibInvalidInput if
# it saw a duplicate child. However, we allow duplicate
# children iff the last child is the same; this means that
# we have a very long entry. If that is the case, try again
# with the allow_duplicates flag set to True.
if not child.is_dir():
try_long_entry = True
else:
raise
if try_long_entry:
ret = child.parent.add_child(child, logical_block_size, True)
# The add_child() method returns True if the parent needs another extent
# in order to fit the directory record for this child. Add another
# extent as appropriate here.
if ret:
return self.pvd.logical_block_size()
return 0 | [] |
Please provide a description of the function:def _remove_child_from_dr(self, child, index, logical_block_size):
# type: (dr.DirectoryRecord, int, int) -> int
'''
An internal method to remove a child from a directory record, shrinking
the space in the Volume Descriptor if necessary.
Parameters:
child - The child to remove.
index - The index of the child into the parent's child array.
logical_block_size - The size of one logical block.
Returns:
The number of bytes to remove for this directory record (this may be zero).
'''
if child.parent is None:
raise pycdlibexception.PyCdlibInternalError('Trying to remove child from non-existent parent')
self._find_iso_record.cache_clear() # pylint: disable=no-member
self._find_rr_record.cache_clear() # pylint: disable=no-member
self._find_joliet_record.cache_clear() # pylint: disable=no-member
# The remove_child() method returns True if the parent no longer needs
# the extent that the directory record for this child was on. Remove
# the extent as appropriate here.
if child.parent.remove_child(child, index, logical_block_size):
return self.pvd.logical_block_size()
return 0 | [] |
Please provide a description of the function:def _add_to_ptr_size(self, ptr):
# type: (path_table_record.PathTableRecord) -> int
'''
An internal method to add a PTR to a VD, adding space to the VD if
necessary.
Parameters:
ptr - The PTR to add to the vd.
Returns:
The number of additional bytes that are needed to fit the new PTR
(this may be zero).
'''
num_bytes_to_add = 0
for pvd in self.pvds:
# The add_to_ptr_size() method returns True if the PVD needs
# additional space in the PTR to store this directory. We always
# add 4 additional extents for that (2 for LE, 2 for BE).
if pvd.add_to_ptr_size(path_table_record.PathTableRecord.record_length(ptr.len_di)):
num_bytes_to_add += 4 * self.pvd.logical_block_size()
return num_bytes_to_add | [] |
Please provide a description of the function:def _remove_from_ptr_size(self, ptr):
# type: (path_table_record.PathTableRecord) -> int
'''
An internal method to remove a PTR from a VD, removing space from the VD if
necessary.
Parameters:
ptr - The PTR to remove from the VD.
Returns:
The number of bytes to remove from the VDs (this may be zero).
'''
num_bytes_to_remove = 0
for pvd in self.pvds:
# The remove_from_ptr_size() returns True if the PVD no longer
# needs the extra extents in the PTR that stored this directory.
# We always remove 4 additional extents for that.
if pvd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(ptr.len_di)):
num_bytes_to_remove += 4 * self.pvd.logical_block_size()
return num_bytes_to_remove | [] |
Please provide a description of the function:def _find_or_create_rr_moved(self):
# type: () -> int
'''
An internal method to find the /RR_MOVED directory on the ISO. If it
already exists, the directory record to it is returned. If it doesn't
yet exist, it is created and the directory record to it is returned.
Parameters:
None.
Returns:
The number of additional bytes needed for the rr_moved directory (this
may be zero).
'''
if self._rr_moved_record is not None:
return 0
if self._rr_moved_name is None:
self._rr_moved_name = b'RR_MOVED'
if self._rr_moved_rr_name is None:
self._rr_moved_rr_name = b'rr_moved'
# No rr_moved found, so we have to create it.
rec = dr.DirectoryRecord()
rec.new_dir(self.pvd, self._rr_moved_name,
self.pvd.root_directory_record(),
self.pvd.sequence_number(), self.rock_ridge,
self._rr_moved_rr_name, self.pvd.logical_block_size(),
False, False, self.xa, 0o040555)
num_bytes_to_add = self._add_child_to_dr(rec,
self.pvd.logical_block_size())
self._create_dot(self.pvd, rec, self.rock_ridge, self.xa, 0o040555)
self._create_dotdot(self.pvd, rec, self.rock_ridge, False, self.xa,
0o040555)
# We always need to add an entry to the path table record
ptr = path_table_record.PathTableRecord()
ptr.new_dir(self._rr_moved_name)
num_bytes_to_add += self.pvd.logical_block_size() + self._add_to_ptr_size(ptr)
rec.set_ptr(ptr)
self._rr_moved_record = rec
return num_bytes_to_add | [] |
Please provide a description of the function:def _calculate_eltorito_boot_info_table_csum(self, data_fp, data_len):
# type: (BinaryIO, int) -> int
'''
An internal method to calculate the checksum for an El Torito Boot Info
Table. This checksum is a simple 32-bit checksum over all of the data
in the boot file, starting right after the Boot Info Table itself.
Parameters:
data_fp - The file object to read the input data from.
data_len - The length of the input file.
Returns:
An integer representing the 32-bit checksum for the boot info table.
'''
# Here we want to read the boot file so we can calculate the checksum
# over it.
num_sectors = utils.ceiling_div(data_len, self.pvd.logical_block_size())
csum = 0
curr_sector = 0
while curr_sector < num_sectors:
block = data_fp.read(self.pvd.logical_block_size())
block = block.ljust(2048, b'\x00')
i = 0
if curr_sector == 0:
# The first 64 bytes are not included in the checksum, so skip
# them here.
i = 64
while i < len(block):
tmp, = struct.unpack_from('=L', block[:i + 4], i)
csum += tmp
csum = csum & 0xffffffff
i += 4
curr_sector += 1
return csum | [] |
Please provide a description of the function:def _check_for_eltorito_boot_info_table(self, ino):
# type: (inode.Inode) -> None
'''
An internal method to check a boot directory record to see if it has
an El Torito Boot Info Table embedded inside of it.
Parameters:
ino - The Inode to check for a Boot Info Table.
Returns:
Nothing.
'''
orig = self._cdfp.tell()
with inode.InodeOpenData(ino, self.pvd.logical_block_size()) as (data_fp, data_len):
data_fp.seek(8, os.SEEK_CUR)
bi_table = eltorito.EltoritoBootInfoTable()
if bi_table.parse(self.pvd, data_fp.read(eltorito.EltoritoBootInfoTable.header_length()), ino):
data_fp.seek(-24, os.SEEK_CUR)
# OK, the rest of the stuff checks out; do a final
# check to make sure the checksum is reasonable.
csum = self._calculate_eltorito_boot_info_table_csum(data_fp, data_len)
if csum == bi_table.csum:
ino.add_boot_info_table(bi_table)
self._cdfp.seek(orig) | [] |
Please provide a description of the function:def _check_rr_name(self, rr_name):
# type: (Optional[str]) -> bytes
'''
An internal method to check whether this ISO requires or does not
require a Rock Ridge path.
Parameters:
rr_name - The Rock Ridge name.
Returns:
The Rock Ridge name in bytes if this is a Rock Ridge ISO, None otherwise.
'''
if self.rock_ridge:
if not rr_name:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be passed for a rock-ridge ISO')
if rr_name.count('/') != 0:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be relative')
return rr_name.encode('utf-8')
if rr_name:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name can only be specified for a rock-ridge ISO')
return b'' | [] |
Please provide a description of the function:def _normalize_joliet_path(self, joliet_path):
# type: (str) -> bytes
'''
An internal method to check whether this ISO does or does not require
a Joliet path. If a Joliet path is required, the path is normalized
and returned.
Parameters:
joliet_path - The joliet_path to normalize (if necessary).
Returns:
The normalized joliet_path if this ISO has Joliet, None otherwise.
'''
tmp_path = b''
if self.joliet_vd is not None:
if not joliet_path:
raise pycdlibexception.PyCdlibInvalidInput('A Joliet path must be passed for a Joliet ISO')
tmp_path = utils.normpath(joliet_path)
else:
if joliet_path:
raise pycdlibexception.PyCdlibInvalidInput('A Joliet path can only be specified for a Joliet ISO')
return tmp_path | [] |
Please provide a description of the function:def _link_eltorito(self, extent_to_inode):
# type: (Dict[int, inode.Inode]) -> None
'''
An internal method to link the El Torito entries into their
corresponding Directory Records, creating new ones if they are
'hidden'. Should only be called on an El Torito ISO.
Parameters:
extent_to_inode - The map that maps extents to Inodes.
Returns:
Nothing.
'''
if self.eltorito_boot_catalog is None:
raise pycdlibexception.PyCdlibInternalError('Trying to link El Torito entries on a non-El Torito ISO')
log_block_size = self.pvd.logical_block_size()
entries_to_assign = [self.eltorito_boot_catalog.initial_entry]
for sec in self.eltorito_boot_catalog.sections:
for entry in sec.section_entries:
entries_to_assign.append(entry)
for entry in entries_to_assign:
entry_extent = entry.get_rba()
if entry_extent in extent_to_inode:
ino = extent_to_inode[entry_extent]
else:
ino = inode.Inode()
ino.parse(entry_extent, entry.length(), self._cdfp,
log_block_size)
extent_to_inode[entry_extent] = ino
self.inodes.append(ino)
ino.linked_records.append(entry)
entry.set_inode(ino) | [] |
Please provide a description of the function:def _parse_udf_vol_descs(self, extent, length, descs):
# type: (int, int, PyCdlib._UDFDescriptors) -> None
'''
An internal method to parse a set of UDF Volume Descriptors.
Parameters:
extent - The extent at which to start parsing.
length - The number of bytes to read from the incoming ISO.
descs - The _UDFDescriptors object to store parsed objects into.
Returns:
Nothing.
'''
# Read in the Volume Descriptor Sequence
self._seek_to_extent(extent)
vd_data = self._cdfp.read(length)
# And parse it. Since the sequence doesn't have to be in any set order,
# and since some of the entries may be missing, we parse the Descriptor
# Tag (the first 16 bytes) to find out what kind of descriptor it is,
# then construct the correct type based on that. We keep going until we
# see a Terminating Descriptor.
block_size = self.pvd.logical_block_size()
offset = 0
current_extent = extent
done = False
while not done:
desc_tag = udfmod.UDFTag()
desc_tag.parse(vd_data[offset:], current_extent)
if desc_tag.tag_ident == 1:
descs.pvd.parse(vd_data[offset:offset + 512], current_extent, desc_tag)
elif desc_tag.tag_ident == 4:
descs.impl_use.parse(vd_data[offset:offset + 512], current_extent, desc_tag)
elif desc_tag.tag_ident == 5:
descs.partition.parse(vd_data[offset:offset + 512], current_extent, desc_tag)
elif desc_tag.tag_ident == 6:
descs.logical_volume.parse(vd_data[offset:offset + 512], current_extent, desc_tag)
elif desc_tag.tag_ident == 7:
descs.unallocated_space.parse(vd_data[offset:offset + 512], current_extent, desc_tag)
elif desc_tag.tag_ident == 8:
descs.terminator.parse(current_extent, desc_tag)
done = True
else:
raise pycdlibexception.PyCdlibInvalidISO('UDF Tag identifier not %d' % (desc_tag.tag_ident))
offset += block_size
current_extent += 1 | [] |
Please provide a description of the function:def _parse_udf_descriptors(self):
# type: () -> None
'''
An internal method to parse the UDF descriptors on the ISO. This should
only be called if it the ISO has a valid UDF Volume Recognition Sequence
at the beginning of the ISO.
Parameters:
None.
Returns:
Nothing.
'''
block_size = self.pvd.logical_block_size()
# Parse the anchors
anchor_locations = [(256 * block_size, os.SEEK_SET), (-2048, os.SEEK_END)]
for loc, whence in anchor_locations:
self._cdfp.seek(loc, whence)
extent = self._cdfp.tell() // 2048
anchor_data = self._cdfp.read(2048)
anchor_tag = udfmod.UDFTag()
anchor_tag.parse(anchor_data, extent)
if anchor_tag.tag_ident != 2:
raise pycdlibexception.PyCdlibInvalidISO('UDF Anchor Tag identifier not 2')
anchor = udfmod.UDFAnchorVolumeStructure()
anchor.parse(anchor_data, extent, anchor_tag)
self.udf_anchors.append(anchor)
# Parse the Main Volume Descriptor Sequence
self._parse_udf_vol_descs(self.udf_anchors[0].main_vd_extent,
self.udf_anchors[0].main_vd_length,
self.udf_main_descs)
# Parse the Reserve Volume Descriptor Sequence
self._parse_udf_vol_descs(self.udf_anchors[0].reserve_vd_extent,
self.udf_anchors[0].reserve_vd_length,
self.udf_reserve_descs)
# Parse the Logical Volume Integrity Sequence
self._seek_to_extent(self.udf_main_descs.logical_volume.integrity_sequence_extent)
integrity_data = self._cdfp.read(self.udf_main_descs.logical_volume.integrity_sequence_length)
offset = 0
current_extent = self.udf_main_descs.logical_volume.integrity_sequence_extent
desc_tag = udfmod.UDFTag()
desc_tag.parse(integrity_data[offset:], current_extent)
if desc_tag.tag_ident != 9:
raise pycdlibexception.PyCdlibInvalidISO('UDF Volume Integrity Tag identifier not 9')
self.udf_logical_volume_integrity.parse(integrity_data[offset:offset + 512], current_extent, desc_tag)
offset += block_size
current_extent += 1
desc_tag = udfmod.UDFTag()
desc_tag.parse(integrity_data[offset:], current_extent)
if desc_tag.tag_ident != 8:
raise pycdlibexception.PyCdlibInvalidISO('UDF Logical Volume Integrity Terminator Tag identifier not 8')
self.udf_logical_volume_integrity_terminator.parse(current_extent, desc_tag)
# Now look for the File Set Descriptor
current_extent = self.udf_main_descs.partition.part_start_location
self._seek_to_extent(current_extent)
# Read the data for the File Set and File Terminator together
file_set_and_term_data = self._cdfp.read(2 * block_size)
desc_tag = udfmod.UDFTag()
desc_tag.parse(file_set_and_term_data[:block_size], 0)
if desc_tag.tag_ident != 256:
raise pycdlibexception.PyCdlibInvalidISO('UDF File Set Tag identifier not 256')
self.udf_file_set.parse(file_set_and_term_data[:block_size],
current_extent, desc_tag)
current_extent += 1
desc_tag = udfmod.UDFTag()
desc_tag.parse(file_set_and_term_data[block_size:],
current_extent - self.udf_main_descs.partition.part_start_location)
if desc_tag.tag_ident != 8:
raise pycdlibexception.PyCdlibInvalidISO('UDF File Set Terminator Tag identifier not 8')
self.udf_file_set_terminator.parse(current_extent, desc_tag) | [] |
Please provide a description of the function:def _parse_udf_file_entry(self, abs_file_entry_extent, icb, parent):
# type: (int, udfmod.UDFLongAD, Optional[udfmod.UDFFileEntry]) -> Optional[udfmod.UDFFileEntry]
'''
An internal method to parse a single UDF File Entry and return the
corresponding object.
Parameters:
part_start - The extent number the partition starts at.
icb - The ICB object for the data.
parent - The parent of the UDF File Entry.
Returns:
A UDF File Entry object corresponding to the on-disk File Entry.
'''
self._seek_to_extent(abs_file_entry_extent)
icbdata = self._cdfp.read(icb.extent_length)
if all(v == 0 for v in bytearray(icbdata)):
# We have seen ISOs in the wild (Windows 2008 Datacenter Enterprise
# Standard SP2 x86 DVD) where the UDF File Identifier points to a
# UDF File Entry of all zeros. In those cases, we just keep the
# File Identifier, and keep the UDF File Entry blank.
return None
desc_tag = udfmod.UDFTag()
desc_tag.parse(icbdata, icb.log_block_num)
if desc_tag.tag_ident != 261:
raise pycdlibexception.PyCdlibInvalidISO('UDF File Entry Tag identifier not 261')
file_entry = udfmod.UDFFileEntry()
file_entry.parse(icbdata, abs_file_entry_extent, parent, desc_tag)
return file_entry | [] |
Please provide a description of the function:def _open_fp(self, fp):
# type: (BinaryIO) -> None
'''
An internal method to open an existing ISO for inspection and
modification. Note that the file object passed in here must stay open
for the lifetime of this object, as the PyCdlib class uses it internally
to do writing and reading operations.
Parameters:
fp - The file object containing the ISO to open up.
Returns:
Nothing.
'''
if hasattr(fp, 'mode') and 'b' not in fp.mode:
raise pycdlibexception.PyCdlibInvalidInput("The file to open must be in binary mode (add 'b' to the open flags)")
self._cdfp = fp
# Get the Primary Volume Descriptor (pvd), the set of Supplementary
# Volume Descriptors (svds), the set of Volume Partition
# Descriptors (vpds), the set of Boot Records (brs), and the set of
# Volume Descriptor Set Terminators (vdsts)
self._parse_volume_descriptors()
old = self._cdfp.tell()
self._cdfp.seek(0)
tmp_mbr = isohybrid.IsoHybrid()
if tmp_mbr.parse(self._cdfp.read(512)):
# We only save the object if it turns out to be a valid IsoHybrid
self.isohybrid_mbr = tmp_mbr
self._cdfp.seek(old)
if self.pvd.application_use[141:149] == b'CD-XA001':
self.xa = True
for br in self.brs:
self._check_and_parse_eltorito(br)
# Now that we have the PVD, parse the Path Tables according to Ecma-119
# section 9.4. We want to ensure that the big endian versions agree
# with the little endian ones (to make sure it is a valid ISO).
# Little Endian first
le_ptrs, extent_to_ptr = self._parse_path_table(self.pvd.path_table_size(),
self.pvd.path_table_location_le)
# Big Endian next.
tmp_be_ptrs, e_unused = self._parse_path_table(self.pvd.path_table_size(),
self.pvd.path_table_location_be)
for index, ptr in enumerate(le_ptrs):
if not ptr.equal_to_be(tmp_be_ptrs[index]):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian path table records do not agree')
self.interchange_level = 1
for svd in self.svds:
if svd.version == 2 and svd.file_structure_version == 2:
self.interchange_level = 4
break
extent_to_inode = {} # type: Dict[int, inode.Inode]
# OK, so now that we have the PVD, we start at its root directory
# record and find all of the files
ic_level, lastbyte = self._walk_directories(self.pvd, extent_to_ptr,
extent_to_inode, le_ptrs)
self.interchange_level = max(self.interchange_level, ic_level)
# On El Torito ISOs, after we have walked the directories we look
# to see if all of the entries in El Torito have corresponding
# directory records. If they don't, then it may be the case that
# the El Torito bits of the system are 'hidden' or 'unlinked',
# meaning that they take up space but have no corresponding directory
# record in the ISO filesystem. In order to accommodate the rest
# of the system, which really expects these things to have directory
# records, we use fake directory records that don't get written out.
#
# Note that we specifically do *not* add these to any sort of parent;
# that way, we don't run afoul of any checks that adding a child to a
# parent might have. This means that if we do ever want to unhide this
# entry, we'll have to do some additional work to give it a real name
# and link it to the appropriate parent.
if self.eltorito_boot_catalog is not None:
self._link_eltorito(extent_to_inode)
# Now that everything has a dirrecord, see if we have a boot
# info table.
self._check_for_eltorito_boot_info_table(self.eltorito_boot_catalog.initial_entry.inode)
for sec in self.eltorito_boot_catalog.sections:
for entry in sec.section_entries:
self._check_for_eltorito_boot_info_table(entry.inode)
# The PVD is finished. Now look to see if we need to parse the SVD.
for svd in self.svds:
if (svd.flags & 0x1) == 0 and svd.escape_sequences[:3] in (b'%/@', b'%/C', b'%/E'):
if self.joliet_vd is not None:
raise pycdlibexception.PyCdlibInvalidISO('Only a single Joliet SVD is supported')
self.joliet_vd = svd
le_ptrs, joliet_extent_to_ptr = self._parse_path_table(svd.path_table_size(),
svd.path_table_location_le)
tmp_be_ptrs, j_unused = self._parse_path_table(svd.path_table_size(),
svd.path_table_location_be)
for index, ptr in enumerate(le_ptrs):
if not ptr.equal_to_be(tmp_be_ptrs[index]):
raise pycdlibexception.PyCdlibInvalidISO('Joliet little-endian and big-endian path table records do not agree')
self._walk_directories(svd, joliet_extent_to_ptr,
extent_to_inode, le_ptrs)
elif svd.version == 2 and svd.file_structure_version == 2:
if self.enhanced_vd is not None:
raise pycdlibexception.PyCdlibInvalidISO('Only a single enhanced VD is supported')
self.enhanced_vd = svd
# We've seen ISOs in the wild (Office XP) that have a PVD space size
# that is smaller than the location of the last directory record
# extent + length. If we see this, automatically update the size in the
# PVD (and any SVDs) so that subsequent operations will be correct.
log_block_size = self.pvd.logical_block_size()
if lastbyte > self.pvd.space_size * log_block_size:
new_pvd_size = utils.ceiling_div(lastbyte, log_block_size)
for pvd in self.pvds:
pvd.space_size = new_pvd_size
if self.joliet_vd is not None:
self.joliet_vd.space_size = new_pvd_size
if self.enhanced_vd is not None:
self.enhanced_vd.space_size = new_pvd_size
# Look to see if this is a UDF volume. It is one if we have a UDF BEA,
# UDF NSR, and UDF TEA, in which case we parse the UDF descriptors and
# walk the filesystem.
if self._has_udf:
self._parse_udf_descriptors()
self._walk_udf_directories(extent_to_inode)
# Now we look for the 'version' volume descriptor, common on ISOs made
# with genisoimage or mkisofs. This volume descriptor doesn't have any
# specification, but from code inspection, it is either a completely
# zero extent, or starts with 'MKI'. Further, it starts directly after
# the VDST, or directly after the UDF recognition sequence (if this is
# a UDF ISO). Thus, we go looking for it at those places, and add it
# if we find it there.
version_vd_extent = self.vdsts[0].extent_location() + 1
if self._has_udf:
version_vd_extent = self.udf_tea.extent_location() + 1
version_vd = headervd.VersionVolumeDescriptor()
self._cdfp.seek(version_vd_extent * log_block_size)
if version_vd.parse(self._cdfp.read(log_block_size), version_vd_extent):
self.version_vd = version_vd
self._initialized = True | [] |
Please provide a description of the function:def _get_and_write_fp(self, iso_path, outfp, blocksize):
# type: (bytes, BinaryIO, int) -> None
'''
An internal method to fetch a single file from the ISO and write it out
to the file object.
Parameters:
iso_path - The absolute path to the file to get data from.
outfp - The file object to write data to.
blocksize - The blocksize to use when copying data.
Returns:
Nothing.
'''
try:
return self._get_file_from_iso_fp(outfp, blocksize, None, None, iso_path)
except pycdlibexception.PyCdlibException:
pass
try:
return self._get_file_from_iso_fp(outfp, blocksize, iso_path, None, None)
except pycdlibexception.PyCdlibException:
pass
self._get_file_from_iso_fp(outfp, blocksize, None, iso_path, None) | [] |
Please provide a description of the function:def _udf_get_file_from_iso_fp(self, outfp, blocksize, udf_path):
# type: (BinaryIO, int, bytes) -> None
'''
An internal method to fetch a single UDF file from the ISO and write it
out to the file object.
Parameters:
outfp - The file object to write data to.
blocksize - The number of bytes in each transfer.
udf_path - The absolute UDF path to lookup on the ISO.
Returns:
Nothing.
'''
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot fetch a udf_path from a non-UDF ISO')
(ident_unused, found_file_entry) = self._find_udf_record(udf_path)
if found_file_entry is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot get the contents of an empty UDF File Entry')
if not found_file_entry.is_file():
raise pycdlibexception.PyCdlibInvalidInput('Can only write out a file')
if found_file_entry.inode is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot write out an entry without data')
if found_file_entry.get_data_length() > 0:
with inode.InodeOpenData(found_file_entry.inode, self.pvd.logical_block_size()) as (data_fp, data_len):
utils.copy_data(data_len, blocksize, data_fp, outfp) | [] |
Please provide a description of the function:def _get_file_from_iso_fp(self, outfp, blocksize, iso_path, rr_path, joliet_path):
# type: (BinaryIO, int, Optional[bytes], Optional[bytes], Optional[bytes]) -> None
'''
An internal method to fetch a single file from the ISO and write it out
to the file object.
Parameters:
outfp - The file object to write data to.
blocksize - The number of bytes in each transfer.
iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive
with rr_path and joliet_path).
rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive
with iso_path and joliet_path).
joliet_path - The absolute Joliet path to lookup on the ISO (exclusive
with iso_path and rr_path).
Returns:
Nothing.
'''
if joliet_path is not None:
if self.joliet_vd is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot fetch a joliet_path from a non-Joliet ISO')
found_record = self._find_joliet_record(joliet_path)
elif rr_path is not None:
if not self.rock_ridge:
raise pycdlibexception.PyCdlibInvalidInput('Cannot fetch a rr_path from a non-Rock Ridge ISO')
found_record = self._find_rr_record(rr_path)
elif iso_path is not None:
found_record = self._find_iso_record(iso_path)
else:
raise pycdlibexception.PyCdlibInternalError('Invalid path passed to get_file_from_iso_fp')
if found_record.is_dir():
raise pycdlibexception.PyCdlibInvalidInput('Cannot write out a directory')
if rr_path is not None or iso_path is not None:
if found_record.rock_ridge is not None and found_record.rock_ridge.is_symlink():
# If this Rock Ridge record is a symlink, it has no data
# associated with it, so it makes no sense to try and get the
# data. In theory, we could follow the symlink to the
# appropriate place and get the data of the thing it points to.
# However, Rock Ridge symlinks are allowed to point *outside*
# of this ISO, so it is really not clear that this is something
# we want to do. For now we make the user follow the symlink
# themselves if they want to get the data. We can revisit this
# decision in the future if we need to.
raise pycdlibexception.PyCdlibInvalidInput('Symlinks have no data associated with them')
if found_record.inode is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot write out a file without data')
while found_record.get_data_length() > 0:
with inode.InodeOpenData(found_record.inode, self.pvd.logical_block_size()) as (data_fp, data_len):
# Here we copy the data into the output file descriptor. If a boot
# info table is present, we overlay the table over bytes 8-64 of the
# file. Note, however, that we never return more bytes than the length
# of the file, so the boot info table may get truncated.
if found_record.inode.boot_info_table is not None:
header_len = min(data_len, 8)
outfp.write(data_fp.read(header_len))
data_len -= header_len
if data_len > 0:
rec = found_record.inode.boot_info_table.record()
table_len = min(data_len, len(rec))
outfp.write(rec[:table_len])
data_len -= table_len
if data_len > 0:
data_fp.seek(len(rec), os.SEEK_CUR)
utils.copy_data(data_len, blocksize, data_fp, outfp)
else:
utils.copy_data(data_len, blocksize, data_fp, outfp)
if found_record.data_continuation is not None:
found_record = found_record.data_continuation
else:
break | [] |
Please provide a description of the function:def _outfp_write_with_check(self, outfp, data, enable_overwrite_check=True):
# type: (BinaryIO, bytes, bool) -> None
'''
Internal method to write data out to the output file descriptor,
ensuring that it doesn't go beyond the bounds of the ISO.
Parameters:
outfp - The file object to write to.
data - The actual data to write.
enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking.
Returns:
Nothing.
'''
start = outfp.tell()
outfp.write(data)
if self._track_writes:
# After the write, double check that we didn't write beyond the
# boundary of the PVD, and raise a PyCdlibException if we do.
end = outfp.tell()
if end > self.pvd.space_size * self.pvd.logical_block_size():
raise pycdlibexception.PyCdlibInternalError('Wrote past the end of the ISO! (%d > %d)' % (end, self.pvd.space_size * self.pvd.logical_block_size()))
if enable_overwrite_check:
bisect.insort_left(self._write_check_list, self._WriteRange(start, end - 1)) | [] |
Please provide a description of the function:def _output_file_data(self, outfp, blocksize, ino):
# type: (BinaryIO, int, inode.Inode) -> int
'''
Internal method to write a directory record entry out.
Parameters:
outfp - The file object to write the data to.
blocksize - The blocksize to use when writing the data out.
ino - The Inode to write.
Returns:
The total number of bytes written out.
'''
log_block_size = self.pvd.logical_block_size()
outfp.seek(ino.extent_location() * log_block_size)
tmp_start = outfp.tell()
with inode.InodeOpenData(ino, log_block_size) as (data_fp, data_len):
utils.copy_data(data_len, blocksize, data_fp, outfp)
utils.zero_pad(outfp, data_len, log_block_size)
if self._track_writes:
end = outfp.tell()
bisect.insort_left(self._write_check_list, self._WriteRange(tmp_start, end - 1))
# If this file is being used as a bootfile, and the user
# requested that the boot info table be patched into it,
# we patch the boot info table at offset 8 here.
if ino.boot_info_table is not None:
old = outfp.tell()
outfp.seek(tmp_start + 8)
self._outfp_write_with_check(outfp, ino.boot_info_table.record(),
enable_overwrite_check=False)
outfp.seek(old)
return outfp.tell() - tmp_start | [] |
Please provide a description of the function:def _write_directory_records(self, vd, outfp, progress):
# type: (headervd.PrimaryOrSupplementaryVD, BinaryIO, PyCdlib._Progress) -> None
'''
An internal method to write out the directory records from a particular
Volume Descriptor.
Parameters:
vd - The Volume Descriptor to write the Directory Records from.
outfp - The file object to write data to.
progress - The _Progress object to use for outputting progress.
Returns:
Nothing.
'''
log_block_size = vd.logical_block_size()
le_ptr_offset = 0
be_ptr_offset = 0
dirs = collections.deque([vd.root_directory_record()])
while dirs:
curr = dirs.popleft()
curr_dirrecord_offset = 0
if curr.is_dir():
if curr.ptr is None:
raise pycdlibexception.PyCdlibInternalError('Directory has no Path Table Record')
# Little Endian PTR
outfp.seek(vd.path_table_location_le * log_block_size + le_ptr_offset)
ret = curr.ptr.record_little_endian()
self._outfp_write_with_check(outfp, ret)
le_ptr_offset += len(ret)
# Big Endian PTR
outfp.seek(vd.path_table_location_be * log_block_size + be_ptr_offset)
ret = curr.ptr.record_big_endian()
self._outfp_write_with_check(outfp, ret)
be_ptr_offset += len(ret)
progress.call(curr.get_data_length())
dir_extent = curr.extent_location()
for child in curr.children:
# No matter what type the child is, we need to first write
# out the directory record entry.
recstr = child.record()
if (curr_dirrecord_offset + len(recstr)) > log_block_size:
dir_extent += 1
curr_dirrecord_offset = 0
outfp.seek(dir_extent * log_block_size + curr_dirrecord_offset)
# Now write out the child
self._outfp_write_with_check(outfp, recstr)
curr_dirrecord_offset += len(recstr)
if child.rock_ridge is not None:
if child.rock_ridge.dr_entries.ce_record is not None:
# The child has a continue block, so write it out here.
ce_rec = child.rock_ridge.dr_entries.ce_record
outfp.seek(ce_rec.bl_cont_area * self.pvd.logical_block_size() + ce_rec.offset_cont_area)
rec = child.rock_ridge.record_ce_entries()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
if child.rock_ridge.child_link_record_exists():
continue
if child.is_dir():
# If the child is a directory, and is not dot or dotdot,
# we want to descend into it to look at the children.
if not child.is_dot() and not child.is_dotdot():
dirs.append(child) | [] |
Please provide a description of the function:def _write_udf_descs(self, descs, outfp, progress):
# type: (PyCdlib._UDFDescriptors, BinaryIO, PyCdlib._Progress) -> None
'''
An internal method to write out a UDF Descriptor sequence.
Parameters:
descs - The UDF Descriptors object to write out.
outfp - The output file descriptor to use for writing.
progress - The _Progress object to use for updating progress.
Returns:
Nothing.
'''
log_block_size = self.pvd.logical_block_size()
outfp.seek(descs.pvd.extent_location() * log_block_size)
rec = descs.pvd.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(descs.impl_use.extent_location() * log_block_size)
rec = descs.impl_use.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(descs.partition.extent_location() * log_block_size)
rec = descs.partition.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(descs.logical_volume.extent_location() * log_block_size)
rec = descs.logical_volume.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(descs.unallocated_space.extent_location() * log_block_size)
rec = descs.unallocated_space.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(descs.terminator.extent_location() * log_block_size)
rec = descs.terminator.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec)) | [] |
Please provide a description of the function:def _write_fp(self, outfp, blocksize, progress_cb, progress_opaque):
# type: (BinaryIO, int, Optional[Callable[[int, int, Any], None]], Optional[Any]) -> None
'''
Write a properly formatted ISO out to the file object passed in. This
also goes by the name of 'mastering'.
Parameters:
outfp - The file object to write the data to.
blocksize - The blocksize to use when copying data.
progress_cb - If not None, a function to call as the write call does its
work. The callback function must have a signature of:
def func(done, total).
progress_opaque - User data to be passed to the progress callback.
Returns:
Nothing.
'''
if hasattr(outfp, 'mode') and 'b' not in outfp.mode:
raise pycdlibexception.PyCdlibInvalidInput("The file to write out must be in binary mode (add 'b' to the open flags)")
if self._needs_reshuffle:
self._reshuffle_extents()
self._write_check_list = []
outfp.seek(0)
log_block_size = self.pvd.logical_block_size()
progress = self._Progress(self.pvd.space_size * log_block_size, progress_cb, progress_opaque)
progress.call(0)
if self.isohybrid_mbr is not None:
self._outfp_write_with_check(outfp,
self.isohybrid_mbr.record(self.pvd.space_size * log_block_size))
# Ecma-119, 6.2.1 says that the Volume Space is divided into a System
# Area and a Data Area, where the System Area is in logical sectors 0
# to 15, and whose contents is not specified by the standard. Thus
# we skip the first 16 sectors.
outfp.seek(self.pvd.extent_location() * log_block_size)
# First write out the PVD.
for pvd in self.pvds:
rec = pvd.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
# Next write out the boot records.
for br in self.brs:
outfp.seek(br.extent_location() * log_block_size)
rec = br.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
# Next we write out the SVDs.
for svd in self.svds:
outfp.seek(svd.extent_location() * log_block_size)
rec = svd.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
# Next we write out the Volume Descriptor Terminators.
for vdst in self.vdsts:
outfp.seek(vdst.extent_location() * log_block_size)
rec = vdst.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
# Next we write out the UDF Volume Recognition sequence (if we are a
# UDF ISO).
if self._has_udf:
outfp.seek(self.udf_bea.extent_location() * log_block_size)
rec = self.udf_bea.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(self.udf_nsr.extent_location() * log_block_size)
rec = self.udf_nsr.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(self.udf_tea.extent_location() * log_block_size)
rec = self.udf_tea.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
# Next we write out the version block if it exists.
if self.version_vd is not None:
outfp.seek(self.version_vd.extent_location() * log_block_size)
rec = self.version_vd.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
if self._has_udf:
# Now the UDF Main and Reserved Volume Descriptor Sequence
self._write_udf_descs(self.udf_main_descs, outfp, progress)
self._write_udf_descs(self.udf_reserve_descs, outfp, progress)
# Now the UDF Logical Volume Integrity Sequence (if there is one).
outfp.seek(self.udf_logical_volume_integrity.extent_location() * log_block_size)
rec = self.udf_logical_volume_integrity.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(self.udf_logical_volume_integrity_terminator.extent_location() * log_block_size)
rec = self.udf_logical_volume_integrity_terminator.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
# Now the UDF Anchor Points (if there are any).
for anchor in self.udf_anchors:
outfp.seek(anchor.extent_location() * log_block_size)
rec = anchor.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
# In theory, the Path Table Records (for both the PVD and SVD) get
# written out next. Since we store them along with the Directory
# Records, however, we will write them out along with the directory
# records instead.
# Now write out the El Torito Boot Catalog if it exists.
if self.eltorito_boot_catalog is not None:
outfp.seek(self.eltorito_boot_catalog.extent_location() * log_block_size)
rec = self.eltorito_boot_catalog.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
# Now write out the ISO9660 directory records.
self._write_directory_records(self.pvd, outfp, progress)
# Now write out the Joliet directory records, if they exist.
if self.joliet_vd is not None:
self._write_directory_records(self.joliet_vd, outfp, progress)
# Now write out the UDF directory records, if they exist.
if self.udf_root is not None:
# Write out the UDF File Sets
outfp.seek(self.udf_file_set.extent_location() * log_block_size)
rec = self.udf_file_set.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
outfp.seek(self.udf_file_set_terminator.extent_location() * log_block_size)
rec = self.udf_file_set_terminator.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
written_file_entry_inodes = {} # type: Dict[int, bool]
udf_file_entries = collections.deque([(self.udf_root, True)]) # type: Deque[Tuple[Optional[udfmod.UDFFileEntry], bool]]
while udf_file_entries:
udf_file_entry, isdir = udf_file_entries.popleft()
if udf_file_entry is None:
continue
if udf_file_entry.inode is None or not id(udf_file_entry.inode) in written_file_entry_inodes:
outfp.seek(udf_file_entry.extent_location() * log_block_size)
rec = udf_file_entry.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
written_file_entry_inodes[id(udf_file_entry.inode)] = True
if isdir:
outfp.seek(udf_file_entry.fi_descs[0].extent_location() * log_block_size)
# FIXME: for larger directories, we'll actually need to
# iterate over the alloc_descs and write them
for fi_desc in udf_file_entry.fi_descs:
rec = fi_desc.record()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
if not fi_desc.is_parent():
udf_file_entries.append((fi_desc.file_entry, fi_desc.is_dir()))
# Now we need to write out the actual files. Note that in many cases,
# we haven't yet read the file out of the original, so we need to do
# that here.
for ino in self.inodes:
if ino.get_data_length() > 0:
progress.call(self._output_file_data(outfp, blocksize, ino))
# We need to pad out to the total size of the disk, in the case that
# the last thing we wrote is shorter than a full block size. It turns
# out that not all file-like objects allow you to use truncate() to
# grow the file, so we do it the old-fashioned way by seeking to the
# end - 1 and writing a padding '\x00' byte.
outfp.seek(0, os.SEEK_END)
total_size = self.pvd.space_size * log_block_size
if outfp.tell() != total_size:
outfp.seek(total_size - 1)
outfp.write(b'\x00')
if self.isohybrid_mbr is not None:
outfp.seek(0, os.SEEK_END)
# Note that we very specifically do not call
# self._outfp_write_with_check here because this writes outside
# the PVD boundaries.
outfp.write(self.isohybrid_mbr.record_padding(self.pvd.space_size * log_block_size))
progress.finish() | [] |
Please provide a description of the function:def _update_rr_ce_entry(self, rec):
# type: (dr.DirectoryRecord) -> int
'''
An internal method to update the Rock Ridge CE entry for the given
record.
Parameters:
rec - The record to update the Rock Ridge CE entry for (if it exists).
Returns:
The number of additional bytes needed for this Rock Ridge CE entry.
'''
if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None:
celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area
added_block, block, offset = self.pvd.add_rr_ce_entry(celen)
rec.rock_ridge.update_ce_block(block)
rec.rock_ridge.dr_entries.ce_record.update_offset(offset)
if added_block:
return self.pvd.logical_block_size()
return 0 | [] |
Please provide a description of the function:def _finish_add(self, num_bytes_to_add, num_partition_bytes_to_add):
# type: (int, int) -> None
'''
An internal method to do all of the accounting needed whenever
something is added to the ISO. This method should only be called by
public API implementations.
Parameters:
num_bytes_to_add - The number of additional bytes to add to all
descriptors.
num_partition_bytes_to_add - The number of additional bytes to add to
the partition if this is a UDF file.
Returns:
Nothing.
'''
for pvd in self.pvds:
pvd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add)
if self.joliet_vd is not None:
self.joliet_vd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add)
if self.enhanced_vd is not None:
self.enhanced_vd.copy_sizes(self.pvd)
if self.udf_root is not None:
num_extents_to_add = utils.ceiling_div(num_partition_bytes_to_add,
self.pvd.logical_block_size())
self.udf_main_descs.partition.part_length += num_extents_to_add
self.udf_reserve_descs.partition.part_length += num_extents_to_add
self.udf_logical_volume_integrity.size_table += num_extents_to_add
if self._always_consistent:
self._reshuffle_extents()
else:
self._needs_reshuffle = True | [] |
Please provide a description of the function:def _finish_remove(self, num_bytes_to_remove, is_partition):
# type: (int, bool) -> None
'''
An internal method to do all of the accounting needed whenever
something is removed from the ISO. This method should only be called
by public API implementations.
Parameters:
num_bytes_to_remove - The number of additional bytes to remove from the descriptors.
is_partition - Whether these bytes are part of a UDF partition.
Returns:
Nothing.
'''
for pvd in self.pvds:
pvd.remove_from_space_size(num_bytes_to_remove)
if self.joliet_vd is not None:
self.joliet_vd.remove_from_space_size(num_bytes_to_remove)
if self.enhanced_vd is not None:
self.enhanced_vd.copy_sizes(self.pvd)
if self.udf_root is not None and is_partition:
num_extents_to_remove = utils.ceiling_div(num_bytes_to_remove,
self.pvd.logical_block_size())
self.udf_main_descs.partition.part_length -= num_extents_to_remove
self.udf_reserve_descs.partition.part_length -= num_extents_to_remove
self.udf_logical_volume_integrity.size_table -= num_extents_to_remove
if self._always_consistent:
self._reshuffle_extents()
else:
self._needs_reshuffle = True | [] |
Please provide a description of the function:def _add_hard_link_to_rec(self, old_rec, boot_catalog_old, **kwargs):
# type: (Any, bool, str) -> int
'''
Add a hard link to the ISO. Hard links are alternate names for the
same file contents that don't take up any additional space on the ISO.
This API can be used to create hard links between two files on the
ISO9660 filesystem, between two files on the Joliet filesystem, or
between a file on the ISO9660 filesystem and the Joliet filesystem.
In all cases, exactly one old path must be specified, and exactly one
new path must be specified.
Parameters:
old_rec - The old record to link against.
boot_catalog_old - Whether this is a link to an old boot catalog.
iso_new_path - The new path on the ISO9660 filesystem to link to.
joliet_new_path - The new path on the Joliet filesystem to link to.
rr_name - The Rock Ridge name to use for the new file if this is a
Rock Ridge ISO and the new path is on the ISO9660 filesystem.
udf_new_path - The new path on the UDF filesystem to link to.
Returns:
The number of bytes to add to the descriptors.
'''
num_new = 0
iso_new_path = None
joliet_new_path = None
rr_name = b''
udf_new_path = None
new_rec = None # type: Optional[Union[dr.DirectoryRecord, udfmod.UDFFileEntry]]
for key in kwargs:
if key == 'iso_new_path' and kwargs[key] is not None:
num_new += 1
iso_new_path = utils.normpath(kwargs[key])
if not self.rock_ridge:
_check_path_depth(iso_new_path)
elif key == 'joliet_new_path' and kwargs[key] is not None:
num_new += 1
joliet_new_path = self._normalize_joliet_path(kwargs[key])
elif key == 'rr_name' and kwargs[key] is not None:
rr_name = self._check_rr_name(kwargs[key])
elif key == 'udf_new_path' and kwargs[key] is not None:
num_new += 1
udf_new_path = utils.normpath(kwargs[key])
else:
raise pycdlibexception.PyCdlibInvalidInput('Unknown keyword %s' % (key))
if num_new != 1:
raise pycdlibexception.PyCdlibInvalidInput('Exactly one new path must be specified')
if self.rock_ridge and iso_new_path is not None and not rr_name:
raise pycdlibexception.PyCdlibInvalidInput('Rock Ridge name must be supplied for a Rock Ridge new path')
data_ino = old_rec.inode
num_bytes_to_add = 0
if udf_new_path is None:
file_mode = -1
if iso_new_path is not None:
# ... to another file on the ISO9660 filesystem.
(new_name, new_parent) = self._iso_name_and_parent_from_path(iso_new_path)
vd = self.pvd
rr = self.rock_ridge
xa = self.xa
if self.rock_ridge:
file_mode = old_rec.rock_ridge.get_file_mode()
elif joliet_new_path is not None:
if self.joliet_vd is None:
raise pycdlibexception.PyCdlibInternalError('Tried to link to Joliet record on non-Joliet ISO')
# ... to a file on the Joliet filesystem.
(new_name, new_parent) = self._joliet_name_and_parent_from_path(joliet_new_path)
vd = self.joliet_vd
rr = ''
xa = False
# Above we checked to make sure we got at least one new path, so we
# don't need to worry about the else situation here.
new_rec = dr.DirectoryRecord()
new_rec.new_file(vd, old_rec.get_data_length(), new_name,
new_parent, vd.sequence_number(), rr, rr_name, xa,
file_mode)
num_bytes_to_add += self._add_child_to_dr(new_rec,
vd.logical_block_size())
else:
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a udf_path for a UDF ISO')
log_block_size = self.pvd.logical_block_size()
# UDF new path
(udf_name, udf_parent) = self._udf_name_and_parent_from_path(udf_new_path)
file_ident = udfmod.UDFFileIdentifierDescriptor()
file_ident.new(False, False, udf_name, udf_parent)
num_new_extents = udf_parent.add_file_ident_desc(file_ident, log_block_size)
num_bytes_to_add += num_new_extents * log_block_size
file_entry = udfmod.UDFFileEntry()
file_entry.new(old_rec.get_data_length(), 'file', udf_parent,
log_block_size)
file_ident.file_entry = file_entry
file_entry.file_ident = file_ident
if data_ino is None or data_ino.num_udf == 0:
num_bytes_to_add += log_block_size
if data_ino is not None:
data_ino.num_udf += 1
new_rec = file_entry
self.udf_logical_volume_integrity.logical_volume_impl_use.num_files += 1
if data_ino is not None and new_rec is not None:
data_ino.linked_records.append(new_rec)
new_rec.inode = data_ino
if boot_catalog_old and new_rec is not None:
if self.eltorito_boot_catalog is None:
raise pycdlibexception.PyCdlibInternalError('Tried to link to El Torito on non-El Torito ISO')
self.eltorito_boot_catalog.add_dirrecord(new_rec)
return num_bytes_to_add | [] |
Please provide a description of the function:def _add_fp(self, fp, length, manage_fp, old_iso_path, orig_rr_name, joliet_path,
udf_path, file_mode, eltorito_catalog):
# type: (Optional[BinaryIO], int, bool, str, Optional[str], Optional[str], Optional[str], Optional[int], bool) -> int
'''
An internal method to add a file to the ISO. If the ISO contains Rock
Ridge, then a Rock Ridge name must be provided. If the ISO contains
Joliet, then a Joliet path is not required but is highly recommended.
Note that the caller must ensure that the file remains open for the
lifetime of the ISO object, as the PyCdlib class uses the file
descriptor internally when writing (mastering) the ISO.
Parameters:
fp - The file object to use for the contents of the new file.
length - The length of the data for the new file.
manage_fp - Whether or not pycdlib should internally manage the file
pointer. It is faster to manage the file pointer
externally, but it is more convenient to have pycdlib do it
internally.
old_iso_path - The ISO9660 absolute path to the file destination on the ISO.
orig_rr_name - The Rock Ridge name of the file destination on the ISO.
joliet_path - The Joliet absolute path to the file destination on the ISO.
udf_path - The UDF absolute path to the file destination on the ISO.
file_mode - The POSIX file_mode to apply to this file. This only
applies if this is a Rock Ridge ISO. If this is None (the
default), the permissions from the original file are used.
eltorito_catalog - Whether this entry represents an El Torito Boot
Catalog.
Returns:
The number of bytes to add to the descriptors.
'''
iso_path = utils.normpath(old_iso_path)
rr_name = self._check_rr_name(orig_rr_name)
# We call _normalize_joliet_path here even though we aren't going to
# use the result. This is to ensure that we throw an exception when
# a joliet_path is passed for a non-Joliet ISO.
if joliet_path:
self._normalize_joliet_path(joliet_path)
if udf_path and self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a UDF path for a UDF ISO')
if not self.rock_ridge:
_check_path_depth(iso_path)
(name, parent) = self._iso_name_and_parent_from_path(iso_path)
_check_iso9660_filename(name, self.interchange_level)
fmode = 0
if file_mode is not None:
if not self.rock_ridge:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a file mode for Rock Ridge ISOs')
fmode = file_mode
else:
if self.rock_ridge:
if fp is not None:
# Python 3 implements the fileno method for all file-like objects, so
# we can't just use the existence of the method to tell whether it is
# available. Instead, we try to assign it, and if we fail, then we
# assume it is not available.
try:
fileno = fp.fileno()
fmode = os.fstat(fileno).st_mode
except (AttributeError, io.UnsupportedOperation):
# We couldn't get the actual file mode of the file, so just assume
# a conservative 444
fmode = 0o0100444
else:
fmode = 0o0100444
left = length
offset = 0
done = False
num_bytes_to_add = 0
first_rec = None
while not done:
# The maximum length we allow in one directory record is 0xfffff800
# (this is taken from xorriso, though I don't really know why).
thislen = min(left, 0xfffff800)
rec = dr.DirectoryRecord()
rec.new_file(self.pvd, thislen, name, parent,
self.pvd.sequence_number(), self.rock_ridge, rr_name,
self.xa, fmode)
num_bytes_to_add += self._add_child_to_dr(rec,
self.pvd.logical_block_size())
# El Torito Boot Catalogs have no inode, so only add it if this is
# not a boot catalog.
if eltorito_catalog:
if self.eltorito_boot_catalog is None:
raise pycdlibexception.PyCdlibInternalError('Tried to add to a non-existent boot catalog')
if offset == 0:
self.eltorito_boot_catalog.add_dirrecord(rec)
else:
# Zero-length files get a directory record but no Inode (there
# is nothing to write out).
if fp is not None:
ino = inode.Inode()
ino.new(thislen, fp, manage_fp, offset)
ino.linked_records.append(rec)
rec.inode = ino
self.inodes.append(ino)
num_bytes_to_add += thislen
if first_rec is None:
first_rec = rec
left -= thislen
offset += thislen
if left == 0:
done = True
num_bytes_to_add += self._update_rr_ce_entry(rec)
if self.joliet_vd is not None and joliet_path:
# If this is a Joliet ISO, then we can re-use add_hard_link to do
# most of the work.
num_bytes_to_add += self._add_hard_link_to_rec(first_rec, eltorito_catalog,
joliet_new_path=joliet_path)
if udf_path:
num_bytes_to_add += self._add_hard_link_to_rec(first_rec, eltorito_catalog,
udf_new_path=udf_path)
return num_bytes_to_add | [] |
Please provide a description of the function:def _rm_dr_link(self, rec):
# type: (dr.DirectoryRecord) -> int
'''
An internal method to remove a Directory Record link given the record.
Parameters:
rec - The Directory Record to remove.
Returns:
The number of bytes to remove from the ISO.
'''
if not rec.is_file():
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)')
num_bytes_to_remove = 0
logical_block_size = rec.vd.logical_block_size()
done = False
while not done:
num_bytes_to_remove += self._remove_child_from_dr(rec,
rec.index_in_parent,
logical_block_size)
if rec.inode is not None:
found_index = None
for index, link in enumerate(rec.inode.linked_records):
if id(link) == id(rec):
found_index = index
break
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del rec.inode.linked_records[found_index]
# We only remove the size of the child from the ISO if there are no
# other references to this file on the ISO.
if not rec.inode.linked_records:
found_index = None
for index, ino in enumerate(self.inodes):
if id(ino) == id(rec.inode):
found_index = index
break
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del self.inodes[found_index]
num_bytes_to_remove += rec.get_data_length()
if rec.data_continuation is not None:
rec = rec.data_continuation
else:
done = True
return num_bytes_to_remove | [] |
Please provide a description of the function:def _rm_udf_file_ident(self, parent, fi):
# type: (udfmod.UDFFileEntry, bytes) -> int
'''
An internal method to remove a UDF File Identifier from the parent
and remove any space from the Logical Volume as necessary.
Parameters:
parent - The parent entry to remove the UDF File Identifier from.
fi - The file identifier to remove.
Returns:
The number of bytes to remove from the ISO.
'''
logical_block_size = self.pvd.logical_block_size()
num_extents_to_remove = parent.remove_file_ident_desc_by_name(fi,
logical_block_size)
self.udf_logical_volume_integrity.logical_volume_impl_use.num_files -= 1
self._find_udf_record.cache_clear() # pylint: disable=no-member
return num_extents_to_remove * logical_block_size | [] |
Please provide a description of the function:def _rm_udf_link(self, rec):
# type: (udfmod.UDFFileEntry) -> int
'''
An internal method to remove a UDF File Entry link.
Parameters:
rec - The UDF File Entry to remove.
Returns:
The number of bytes to remove from the ISO.
'''
if not rec.is_file() and not rec.is_symlink():
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)')
# To remove something from UDF, we have to:
# 1. Remove it from the list of linked_records on the Inode.
# 2. If the number of links to the Inode is now 0, remove the Inode.
# 3. If the number of links to the UDF File Entry this uses is 0,
# remove the UDF File Entry.
# 4. Remove the UDF File Identifier from the parent.
logical_block_size = self.pvd.logical_block_size()
num_bytes_to_remove = 0
if rec.inode is not None:
# Step 1.
found_index = None
for index, link in enumerate(rec.inode.linked_records):
if id(link) == id(rec):
found_index = index
break
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del rec.inode.linked_records[found_index]
rec.inode.num_udf -= 1
# Step 2.
if not rec.inode.linked_records:
found_index = None
for index, ino in enumerate(self.inodes):
if id(ino) == id(rec.inode):
found_index = index
break
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del self.inodes[found_index]
num_bytes_to_remove += rec.get_data_length()
# Step 3.
if rec.inode.num_udf == 0:
num_bytes_to_remove += logical_block_size
else:
# If rec.inode is None, then we are just removing the UDF File
# Entry.
num_bytes_to_remove += logical_block_size
# Step 4.
if rec.parent is None:
raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no parent')
if rec.file_ident is None:
raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no file identifier')
return num_bytes_to_remove + self._rm_udf_file_ident(rec.parent, rec.file_ident.fi) | [] |
Please provide a description of the function:def _add_joliet_dir(self, joliet_path):
# type: (bytes) -> int
'''
An internal method to add a joliet directory to the ISO.
Parameters:
joliet_path - The path to add to the Joliet portion of the ISO.
Returns:
The number of additional bytes needed on the ISO to fit this directory.
'''
if self.joliet_vd is None:
raise pycdlibexception.PyCdlibInternalError('Tried to add joliet dir to non-Joliet ISO')
(joliet_name, joliet_parent) = self._joliet_name_and_parent_from_path(joliet_path)
log_block_size = self.joliet_vd.logical_block_size()
rec = dr.DirectoryRecord()
rec.new_dir(self.joliet_vd, joliet_name, joliet_parent,
self.joliet_vd.sequence_number(), '', b'',
log_block_size, False, False,
False, -1)
num_bytes_to_add = self._add_child_to_dr(rec, log_block_size)
self._create_dot(self.joliet_vd, rec, '', False, -1)
self._create_dotdot(self.joliet_vd, rec, '', False, False, -1)
num_bytes_to_add += log_block_size
if self.joliet_vd.add_to_ptr_size(path_table_record.PathTableRecord.record_length(len(joliet_name))):
num_bytes_to_add += 4 * log_block_size
# We always need to add an entry to the path table record
ptr = path_table_record.PathTableRecord()
ptr.new_dir(joliet_name)
rec.set_ptr(ptr)
return num_bytes_to_add | [] |
Please provide a description of the function:def _rm_joliet_dir(self, joliet_path):
# type: (bytes) -> int
'''
An internal method to remove a directory from the Joliet portion of the ISO.
Parameters:
joliet_path - The Joliet directory to remove.
Returns:
The number of bytes to remove from the ISO for this Joliet directory.
'''
if self.joliet_vd is None:
raise pycdlibexception.PyCdlibInternalError('Tried to remove joliet dir from non-Joliet ISO')
log_block_size = self.joliet_vd.logical_block_size()
joliet_child = self._find_joliet_record(joliet_path)
num_bytes_to_remove = joliet_child.get_data_length()
num_bytes_to_remove += self._remove_child_from_dr(joliet_child,
joliet_child.index_in_parent,
log_block_size)
if joliet_child.ptr is None:
raise pycdlibexception.PyCdlibInternalError('Joliet directory has no path table record; this should not be')
if self.joliet_vd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(joliet_child.ptr.len_di)):
num_bytes_to_remove += 4 * log_block_size
return num_bytes_to_remove | [] |
Please provide a description of the function:def _get_entry(self, iso_path, rr_path, joliet_path):
# type: (Optional[bytes], Optional[bytes], Optional[bytes]) -> dr.DirectoryRecord
'''
Internal method to get the directory record for a particular path.
Parameters:
iso_path - The path on the ISO filesystem to look up the record for.
rr_path - The Rock Ridge path on the ISO filesystem to look up the
record for.
joliet_path - The path on the Joliet filesystem to look up the record
for.
Returns:
A dr.DirectoryRecord object representing the path.
'''
if self._needs_reshuffle:
self._reshuffle_extents()
rec = None
if joliet_path is not None:
rec = self._find_joliet_record(joliet_path)
elif rr_path is not None:
rec = self._find_rr_record(rr_path)
elif iso_path is not None:
rec = self._find_iso_record(iso_path)
else:
raise pycdlibexception.PyCdlibInternalError('get_entry called without legal argument')
return rec | [] |
Please provide a description of the function:def _get_udf_entry(self, udf_path):
# type: (str) -> udfmod.UDFFileEntry
'''
Internal method to get the UDF File Entry for a particular path.
Parameters:
udf_path - The path on the UDF filesystem to look up the record for.
Returns:
A udfmod.UDFFileEntry object representing the path.
'''
if self._needs_reshuffle:
self._reshuffle_extents()
(ident_unused, rec) = self._find_udf_record(utils.normpath(udf_path))
if rec is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot get entry for empty UDF File Entry')
return rec | [] |
Please provide a description of the function:def _create_dot(self, vd, parent, rock_ridge, xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, dr.DirectoryRecord, str, bool, int) -> None
'''
An internal method to create a new 'dot' Directory Record.
Parameters:
vd - The volume descriptor to attach the 'dot' Directory Record to.
parent - The parent Directory Record for new Directory Record.
rock_ridge - The Rock Ridge version to use for this entry (if any).
xa - Whether this Directory Record should have extended attributes.
file_mode - The mode to assign to the dot directory (only applies to Rock Ridge).
Returns:
Nothing.
'''
dot = dr.DirectoryRecord()
dot.new_dot(vd, parent, vd.sequence_number(), rock_ridge,
vd.logical_block_size(), xa, file_mode)
self._add_child_to_dr(dot, vd.logical_block_size()) | [] |
Please provide a description of the function:def _create_dotdot(self, vd, parent, rock_ridge, relocated, xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, dr.DirectoryRecord, str, bool, bool, int) -> dr.DirectoryRecord
'''
An internal method to create a new 'dotdot' Directory Record.
Parameters:
vd - The volume descriptor to attach the 'dotdot' Directory Record to.
parent - The parent Directory Record for new Directory Record.
rock_ridge - The Rock Ridge version to use for this entry (if any).
relocated - Whether this Directory Record is a Rock Ridge relocated entry.
xa - Whether this Directory Record should have extended attributes.
file_mode - The mode to assign to the dot directory (only applies to Rock Ridge).
Returns:
Nothing.
'''
dotdot = dr.DirectoryRecord()
dotdot.new_dotdot(vd, parent, vd.sequence_number(), rock_ridge,
vd.logical_block_size(), relocated, xa, file_mode)
self._add_child_to_dr(dotdot, vd.logical_block_size())
return dotdot | [] |
Please provide a description of the function:def new(self, interchange_level=1, sys_ident='', vol_ident='', set_size=1,
seqnum=1, log_block_size=2048, vol_set_ident=' ', pub_ident_str='',
preparer_ident_str='', app_ident_str='', copyright_file='',
abstract_file='', bibli_file='', vol_expire_date=None, app_use='',
joliet=None, rock_ridge=None, xa=False, udf=None):
# type: (int, str, str, int, int, int, str, str, str, str, str, str, str, Optional[float], str, Optional[int], Optional[str], bool, Optional[str]) -> None
'''
Create a new ISO from scratch.
Parameters:
interchange_level - The ISO9660 interchange level to use; this dictates
the rules on the names of files. Levels 1, 2, 3,
and 4 are supported. Level 1 is the most
conservative, and is the default, but level 3 is
recommended.
sys_ident - The system identification string to use on the new ISO.
vol_ident - The volume identification string to use on the new ISO.
set_size - The size of the set of ISOs this ISO is a part of.
seqnum - The sequence number of the set of this ISO.
log_block_size - The logical block size to use for the ISO. While ISO9660
technically supports sizes other than 2048 (the default),
this almost certainly doesn't work.
vol_set_ident - The volume set identification string to use on the new ISO.
pub_ident_str - The publisher identification string to use on the new ISO.
preparer_ident_str - The preparer identification string to use on the new ISO.
app_ident_str - The application identification string to use on the new ISO.
copyright_file - The name of a file at the root of the ISO to use as the
copyright file.
abstract_file - The name of a file at the root of the ISO to use as the
abstract file.
bibli_file - The name of a file at the root of the ISO to use as the
bibliographic file.
vol_expire_date - The date that this ISO will expire at.
app_use - Arbitrary data that the application can stuff into the primary
volume descriptor of this ISO.
joliet - A integer that can have the value 1, 2, or 3 for Joliet
levels 1, 2, or 3 (3 is by far the most common), or None for
no Joliet support (the default). For legacy reasons, this
parameter also accepts a boolean, where the value of 'False'
means no Joliet and a value of 'True' means level 3.
rock_ridge - Whether to make this ISO have the Rock Ridge extensions or
not. The default value of None does not add Rock Ridge
extensions. A string value of '1.09', '1.10', or '1.12'
adds the specified Rock Ridge version to the ISO. If
unsure, pass '1.09' to ensure maximum compatibility.
xa - Whether to add the ISO9660 Extended Attribute extensions to this
ISO. The default is False.
udf - Whether to add UDF support to this ISO. If it is None (the
default), no UDF support is added. If it is "2.60", version 2.60
of the UDF spec is used. All other values are disallowed.
Returns:
Nothing.
'''
# Start out with argument checking.
if self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object')
if interchange_level < 1 or interchange_level > 4:
raise pycdlibexception.PyCdlibInvalidInput('Invalid interchange level (must be between 1 and 4)')
if rock_ridge and rock_ridge not in ['1.09', '1.10', '1.12']:
raise pycdlibexception.PyCdlibInvalidInput('Rock Ridge value must be None (no Rock Ridge), 1.09, 1.10, or 1.12')
if udf and udf != '2.60':
raise pycdlibexception.PyCdlibInvalidInput('UDF value must be empty (no UDF), or 2.60')
# Now save off the arguments we need to keep around.
if not app_ident_str:
app_ident_str = 'PyCdlib (C) 2015-2018 Chris Lalancette'
self.interchange_level = interchange_level
self.xa = xa
if isinstance(joliet, bool):
if joliet:
joliet = 3
else:
joliet = None
if rock_ridge:
self.rock_ridge = rock_ridge
sys_ident_bytes = sys_ident.encode('utf-8')
vol_ident_bytes = vol_ident.encode('utf-8')
vol_set_ident_bytes = vol_set_ident.encode('utf-8')
pub_ident_bytes = pub_ident_str.encode('utf-8')
preparer_ident_bytes = preparer_ident_str.encode('utf-8')
app_ident_bytes = app_ident_str.encode('utf-8')
copyright_file_bytes = copyright_file.encode('utf-8')
abstract_file_bytes = abstract_file.encode('utf-8')
bibli_file_bytes = bibli_file.encode('utf-8')
app_use_bytes = app_use.encode('utf-8')
if vol_expire_date is None:
real_vol_expire_date = 0.0
else:
real_vol_expire_date = vol_expire_date
# Now start creating the ISO.
self.pvd = headervd.pvd_factory(sys_ident_bytes, vol_ident_bytes,
set_size, seqnum, log_block_size,
vol_set_ident_bytes, pub_ident_bytes,
preparer_ident_bytes, app_ident_bytes,
copyright_file_bytes,
abstract_file_bytes, bibli_file_bytes,
real_vol_expire_date, app_use_bytes, xa)
self.pvds.append(self.pvd)
pvd_log_block_size = self.pvd.logical_block_size()
num_bytes_to_add = 0
if self.interchange_level == 4:
self.enhanced_vd = headervd.enhanced_vd_factory(sys_ident_bytes,
vol_ident_bytes,
set_size, seqnum,
log_block_size,
vol_set_ident_bytes,
pub_ident_bytes,
preparer_ident_bytes,
app_ident_bytes,
copyright_file_bytes,
abstract_file_bytes,
bibli_file_bytes,
real_vol_expire_date,
app_use_bytes, xa)
self.svds.append(self.enhanced_vd)
num_bytes_to_add += self.enhanced_vd.logical_block_size()
if joliet is not None:
self.joliet_vd = headervd.joliet_vd_factory(joliet, sys_ident_bytes,
vol_ident_bytes, set_size,
seqnum, log_block_size,
vol_set_ident_bytes,
pub_ident_bytes,
preparer_ident_bytes,
app_ident_bytes,
copyright_file_bytes,
abstract_file_bytes,
bibli_file_bytes,
real_vol_expire_date,
app_use_bytes, xa)
self.svds.append(self.joliet_vd)
# Now that we have added joliet, we need to add the new space to the
# PVD for the VD itself.
num_bytes_to_add += self.joliet_vd.logical_block_size()
self.vdsts.append(headervd.vdst_factory())
num_bytes_to_add += pvd_log_block_size
if udf:
self._has_udf = True
# Create the Bridge Recognition Volume Sequence
self.udf_bea.new()
self.udf_nsr.new(2)
self.udf_tea.new()
num_bytes_to_add += 3 * pvd_log_block_size
# We always create an empty version volume descriptor
self.version_vd = headervd.version_vd_factory(pvd_log_block_size)
num_bytes_to_add += pvd_log_block_size
if udf:
# We need to pad out to extent 32. The padding should be the
# distance between the current PVD space size and 32.
additional_extents = 32 - (self.pvd.space_size + (num_bytes_to_add // pvd_log_block_size))
num_bytes_to_add += additional_extents * pvd_log_block_size
# Create the Main Volume Descriptor Sequence
self.udf_main_descs.pvd.new()
self.udf_main_descs.impl_use.new()
self.udf_main_descs.partition.new()
self.udf_main_descs.logical_volume.new()
self.udf_main_descs.unallocated_space.new()
self.udf_main_descs.terminator.new()
num_bytes_to_add += 16 * pvd_log_block_size
# Create the Reserve Volume Descriptor Sequence
self.udf_reserve_descs.pvd.new()
self.udf_reserve_descs.impl_use.new()
self.udf_reserve_descs.partition.new()
self.udf_reserve_descs.logical_volume.new()
self.udf_reserve_descs.unallocated_space.new()
self.udf_reserve_descs.terminator.new()
num_bytes_to_add += 16 * pvd_log_block_size
# Create the Logical Volume Integrity Sequence
self.udf_logical_volume_integrity.new()
self.udf_logical_volume_integrity_terminator.new()
num_bytes_to_add += 192 * pvd_log_block_size
# Create the Anchor
anchor1 = udfmod.UDFAnchorVolumeStructure()
anchor1.new()
self.udf_anchors.append(anchor1)
num_bytes_to_add += pvd_log_block_size
# Create the File Set
self.udf_file_set.new()
self.udf_file_set_terminator.new()
num_bytes_to_add += 2 * pvd_log_block_size
# Create the root directory, and the 'parent' entry inside.
self.udf_root = udfmod.UDFFileEntry()
self.udf_root.new(0, 'dir', None, pvd_log_block_size)
num_bytes_to_add += pvd_log_block_size
parent = udfmod.UDFFileIdentifierDescriptor()
parent.new(True, True, b'', None)
num_new_extents = self.udf_root.add_file_ident_desc(parent, pvd_log_block_size)
num_bytes_to_add += num_new_extents * pvd_log_block_size
num_partition_bytes_to_add = 0
# Create the PTR, and add the 4 extents that comprise of the LE PTR and
# BE PTR to the number of bytes to add.
ptr = path_table_record.PathTableRecord()
ptr.new_root()
self.pvd.root_directory_record().set_ptr(ptr)
num_partition_bytes_to_add += 4 * pvd_log_block_size
# Also add one extent to the size for the root directory record.
num_partition_bytes_to_add += pvd_log_block_size
self._create_dot(self.pvd, self.pvd.root_directory_record(),
self.rock_ridge, self.xa, 0o040555)
self._create_dotdot(self.pvd, self.pvd.root_directory_record(),
self.rock_ridge, False, self.xa, 0o040555)
if self.joliet_vd is not None:
# Create the PTR, and add the 4 extents that comprise of the LE PTR and
# BE PTR to the number of bytes to add.
ptr = path_table_record.PathTableRecord()
ptr.new_root()
self.joliet_vd.root_directory_record().set_ptr(ptr)
num_partition_bytes_to_add += 4 * pvd_log_block_size
# Also add one extent to the size for the root directory record.
num_partition_bytes_to_add += pvd_log_block_size
self._create_dot(self.joliet_vd,
self.joliet_vd.root_directory_record(), '',
False, -1)
self._create_dotdot(self.joliet_vd,
self.joliet_vd.root_directory_record(), '',
False, False, -1)
if self.rock_ridge:
num_partition_bytes_to_add += pvd_log_block_size
if udf:
anchor2 = udfmod.UDFAnchorVolumeStructure()
anchor2.new()
self.udf_anchors.append(anchor2)
num_partition_bytes_to_add += pvd_log_block_size
self._finish_add(num_bytes_to_add, num_partition_bytes_to_add)
self._initialized = True | [] |
Please provide a description of the function:def open(self, filename):
# type: (str) -> None
'''
Open up an existing ISO for inspection and modification.
Parameters:
filename - The filename containing the ISO to open up.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object')
fp = open(filename, 'r+b')
self._managing_fp = True
try:
self._open_fp(fp)
except Exception:
fp.close()
raise | [] |
Please provide a description of the function:def open_fp(self, fp):
# type: (BinaryIO) -> None
'''
Open up an existing ISO for inspection and modification. Note that the
file object passed in here must stay open for the lifetime of this
object, as the PyCdlib class uses it internally to do writing and reading
operations. If you want PyCdlib to manage this for you, use 'open'
instead.
Parameters:
fp - The file object containing the ISO to open up.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object')
self._open_fp(fp) | [] |
Please provide a description of the function:def get_file_from_iso(self, local_path, **kwargs):
# type: (str, Any) -> None
'''
A method to fetch a single file from the ISO and write it out
to a local file.
Parameters:
local_path - The local file to write to.
blocksize - The number of bytes in each transfer.
iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive
with rr_path, joliet_path, and udf_path).
rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive
with iso_path, joliet_path, and udf_path).
joliet_path - The absolute Joliet path to lookup on the ISO (exclusive
with iso_path, rr_path, and udf_path).
udf_path - The absolute UDF path to lookup on the ISO (exclusive with
iso_path, rr_path, and joliet_path).
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
blocksize = 8192
joliet_path = None
iso_path = None
rr_path = None
udf_path = None
num_paths = 0
for key in kwargs:
if key == 'blocksize':
blocksize = kwargs[key]
elif key == 'iso_path' and kwargs[key] is not None:
iso_path = utils.normpath(kwargs[key])
num_paths += 1
elif key == 'rr_path' and kwargs[key] is not None:
rr_path = utils.normpath(kwargs[key])
num_paths += 1
elif key == 'joliet_path' and kwargs[key] is not None:
joliet_path = utils.normpath(kwargs[key])
num_paths += 1
elif key == 'udf_path' and kwargs[key] is not None:
udf_path = utils.normpath(kwargs[key])
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput('Unknown keyword %s' % (key))
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Exactly one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path' must be passed")
with open(local_path, 'wb') as fp:
if udf_path is not None:
self._udf_get_file_from_iso_fp(fp, blocksize, udf_path)
else:
self._get_file_from_iso_fp(fp, blocksize, iso_path, rr_path, joliet_path) | [] |
Please provide a description of the function:def get_file_from_iso_fp(self, outfp, **kwargs):
# type: (BinaryIO, Any) -> None
'''
A method to fetch a single file from the ISO and write it out
to the file object.
Parameters:
outfp - The file object to write data to.
blocksize - The number of bytes in each transfer.
iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive
with rr_path, joliet_path, and udf_path).
rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive
with iso_path, joliet_path, and udf_path).
joliet_path - The absolute Joliet path to lookup on the ISO (exclusive
with iso_path, rr_path, and udf_path).
udf_path - The absolute UDF path to lookup on the ISO (exclusive with
iso_path, rr_path, and joliet_path).
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
blocksize = 8192
joliet_path = None
iso_path = None
rr_path = None
udf_path = None
num_paths = 0
for key in kwargs:
if key == 'blocksize':
blocksize = kwargs[key]
elif key == 'iso_path' and kwargs[key] is not None:
iso_path = utils.normpath(kwargs[key])
num_paths += 1
elif key == 'rr_path' and kwargs[key] is not None:
rr_path = utils.normpath(kwargs[key])
num_paths += 1
elif key == 'joliet_path' and kwargs[key] is not None:
joliet_path = utils.normpath(kwargs[key])
num_paths += 1
elif key == 'udf_path' and kwargs[key] is not None:
udf_path = utils.normpath(kwargs[key])
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput('Unknown keyword %s' % (key))
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Exactly one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path' must be passed")
if udf_path is not None:
self._udf_get_file_from_iso_fp(outfp, blocksize, udf_path)
else:
self._get_file_from_iso_fp(outfp, blocksize, iso_path, rr_path, joliet_path) | [] |
Please provide a description of the function:def get_and_write(self, iso_path, local_path, blocksize=8192):
# type: (str, str, int) -> None
'''
(deprecated) Fetch a single file from the ISO and write it out to the
specified file. Note that this will overwrite the contents of the local
file if it already exists. Also note that 'iso_path' must be an
absolute path to the file. Finally, the 'iso_path' can be an ISO9660
path, a Rock Ridge path, or a Joliet path. In the case of ambiguity,
the Joliet path is tried first, followed by the ISO9660 path, followed
by the Rock Ridge path. It is recommended to use the get_file_from_iso
API instead to resolve this ambiguity.
Parameters:
iso_path - The absolute path to the file to get data from.
local_path - The local filename to write the contents to.
blocksize - The blocksize to use when copying data; the default is 8192.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
with open(local_path, 'wb') as fp:
self._get_and_write_fp(utils.normpath(iso_path), fp, blocksize) | [] |
Please provide a description of the function:def get_and_write_fp(self, iso_path, outfp, blocksize=8192):
# type: (str, BinaryIO, int) -> None
'''
(deprecated) Fetch a single file from the ISO and write it out to the
file object. Note that 'iso_path' must be an absolute path to the file.
Also note that the 'iso_path' can be an ISO9660 path, a Rock Ridge path,
or a Joliet path. In the case of ambiguity, the Joliet path is tried
first, followed by the ISO9660 path, followed by the Rock Ridge path.
It is recommend to use the get_file_from_iso_fp API instead to resolve
this ambiguity.
Parameters:
iso_path - The absolute path to the file to get data from.
outfp - The file object to write data to.
blocksize - The blocksize to use when copying data; the default is 8192.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
self._get_and_write_fp(utils.normpath(iso_path), outfp, blocksize) | [] |
Please provide a description of the function:def write(self, filename, blocksize=32768, progress_cb=None, progress_opaque=None):
# type: (str, int, Optional[Callable[[int, int, Any], None]], Optional[Any]) -> None
'''
Write a properly formatted ISO out to the filename passed in. This
also goes by the name of 'mastering'.
Parameters:
filename - The filename to write the data to.
blocksize - The blocksize to use when copying data; set to 32768 by default.
progress_cb - If not None, a function to call as the write call does its
work. The callback function must have a signature of:
def func(done, total, opaque).
progress_opaque - User data to be passed to the progress callback.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
with open(filename, 'wb') as fp:
self._write_fp(fp, blocksize, progress_cb, progress_opaque) | [] |
Please provide a description of the function:def write_fp(self, outfp, blocksize=32768, progress_cb=None, progress_opaque=None):
# type: (BinaryIO, int, Optional[Callable[[int, int, Any], None]], Optional[Any]) -> None
'''
Write a properly formatted ISO out to the file object passed in. This
also goes by the name of 'mastering'.
Parameters:
outfp - The file object to write the data to.
blocksize - The blocksize to use when copying data; set to 32768 by default.
progress_cb - If not None, a function to call as the write call does its
work. The callback function must have a signature of:
def func(done, total, opaque).
progress_opaque - User data to be passed to the progress callback.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
self._write_fp(outfp, blocksize, progress_cb, progress_opaque) | [] |
Please provide a description of the function:def add_fp(self, fp, length, iso_path, rr_name=None, joliet_path=None,
file_mode=None, udf_path=None):
# type: (BinaryIO, int, str, Optional[str], Optional[str], Optional[int], Optional[str]) -> None
'''
Add a file to the ISO. If the ISO is a Rock Ridge one, then a Rock
Ridge name must also be provided. If the ISO is a Joliet one, then a
Joliet path may also be provided; while it is optional to do so, it is
highly recommended. Note that the caller must ensure that 'fp' remains
open for the lifetime of the PyCdlib object, as the PyCdlib class uses
the file descriptor internally when writing (mastering) the ISO. If
you want PyCdlib to manage this for you, use 'add_file' instead.
Parameters:
fp - The file object to use for the contents of the new file.
length - The length of the data for the new file.
iso_path - The ISO9660 absolute path to the file destination on the ISO.
rr_name - The Rock Ridge name of the file destination on the ISO.
joliet_path - The Joliet absolute path to the file destination on the ISO.
file_mode - The POSIX file_mode to apply to this file. This only
applies if this is a Rock Ridge ISO. If this is None (the
default), the permissions from the original file are used.
udf_path - The UDF name of the file destination on the ISO.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if not utils.file_object_supports_binary(fp):
raise pycdlibexception.PyCdlibInvalidInput('The fp argument must be in binary mode')
num_bytes_to_add = self._add_fp(fp, length, False, iso_path, rr_name,
joliet_path, udf_path, file_mode, False)
self._finish_add(0, num_bytes_to_add) | [] |
Please provide a description of the function:def add_file(self, filename, iso_path, rr_name=None, joliet_path=None,
file_mode=None, udf_path=None):
# type: (Any, str, Optional[str], str, Optional[int], Optional[str]) -> None
'''
Add a file to the ISO. If the ISO is a Rock Ridge one, then a Rock
Ridge name must also be provided. If the ISO is a Joliet one, then a
Joliet path may also be provided; while it is optional to do so, it is
highly recommended.
Parameters:
filename - The filename to use for the data contents for the new file.
iso_path - The ISO9660 absolute path to the file destination on the ISO.
rr_name - The Rock Ridge name of the file destination on the ISO.
joliet_path - The Joliet absolute path to the file destination on the ISO.
file_mode - The POSIX file_mode to apply to this file. This only
applies if this is a Rock Ridge ISO. If this is None (the
default), the permissions from the original file are used.
udf_path - The UDF name of the file destination on the ISO.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_bytes_to_add = self._add_fp(filename, os.stat(filename).st_size,
True, iso_path, rr_name, joliet_path,
udf_path, file_mode, False)
self._finish_add(0, num_bytes_to_add) | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.