desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'A CSV representation of the :class:`Dataset` object. The top row will contain
headers, if they have been set. Otherwise, the top row will contain
the first row of the dataset.
A dataset object can also be imported by setting the :class:`Dataset.csv` attribute. ::
data = tablib.Dataset()
data.csv = \'age, first_name, last_name\n90, John, Adams\'
Import assumes (for now) that headers exist.
.. admonition:: Binary Warning
:class:`Dataset.csv` uses \r\n line endings by default, so make
sure to write in binary mode::
with open(\'output.csv\', \'wb\') as f:
f.write(data.csv)
If you do not do this, and you export the file on Windows, your
CSV file will open in Excel with a blank line between each row.'
| @property
def csv():
| pass
|
'A TSV representation of the :class:`Dataset` object. The top row will contain
headers, if they have been set. Otherwise, the top row will contain
the first row of the dataset.
A dataset object can also be imported by setting the :class:`Dataset.tsv` attribute. ::
data = tablib.Dataset()
data.tsv = \'age first_name last_name\n90 John Adams\'
Import assumes (for now) that headers exist.'
| @property
def tsv():
| pass
|
'A YAML representation of the :class:`Dataset` object. If headers have been
set, a YAML list of objects will be returned. If no headers have
been set, a YAML list of lists (rows) will be returned instead.
A dataset object can also be imported by setting the :class:`Dataset.yaml` attribute: ::
data = tablib.Dataset()
data.yaml = \'- {age: 90, first_name: John, last_name: Adams}\'
Import assumes (for now) that headers exist.'
| @property
def yaml():
| pass
|
'A JSON representation of the :class:`Dataset` object. If headers have been
set, a JSON list of objects will be returned. If no headers have
been set, a JSON list of lists (rows) will be returned instead.
A dataset object can also be imported by setting the :class:`Dataset.json` attribute: ::
data = tablib.Dataset()
data.json = \'[{"age": 90, "first_name": "John", "last_name": "Adams"}]\'
Import assumes (for now) that headers exist.'
| @property
def json():
| pass
|
'A HTML table representation of the :class:`Dataset` object. If
headers have been set, they will be used as table headers.
..notice:: This method can be used for export only.'
| @property
def html():
| pass
|
'A dBASE representation of the :class:`Dataset` object.
A dataset object can also be imported by setting the
:class:`Dataset.dbf` attribute. ::
# To import data from an existing DBF file:
data = tablib.Dataset()
data.dbf = open(\'existing_table.dbf\').read()
# to import data from an ASCII-encoded bytestring:
data = tablib.Dataset()
data.dbf = \'<bytestring of tabular data>\'
.. admonition:: Binary Warning
:class:`Dataset.dbf` contains binary data, so make sure to write in binary mode::
with open(\'output.dbf\', \'wb\') as f:
f.write(data.dbf)'
| @property
def dbf():
| pass
|
'A LaTeX booktabs representation of the :class:`Dataset` object. If a
title has been set, it will be exported as the table caption.
.. note:: This method can be used for export only.'
| @property
def latex():
| pass
|
'Inserts a row to the :class:`Dataset` at the given index.
Rows inserted must be the correct size (height or width).
The default behaviour is to insert the given row to the :class:`Dataset`
object at the given index.'
| def insert(self, index, row, tags=list()):
| self._validate(row)
self._data.insert(index, Row(row, tags=tags))
|
'Adds a row to the end of the :class:`Dataset`.
See :class:`Dataset.insert` for additional documentation.'
| def rpush(self, row, tags=list()):
| self.insert(self.height, row=row, tags=tags)
|
'Adds a row to the top of the :class:`Dataset`.
See :class:`Dataset.insert` for additional documentation.'
| def lpush(self, row, tags=list()):
| self.insert(0, row=row, tags=tags)
|
'Adds a row to the :class:`Dataset`.
See :class:`Dataset.insert` for additional documentation.'
| def append(self, row, tags=list()):
| self.rpush(row, tags)
|
'Adds a list of rows to the :class:`Dataset` using
:class:`Dataset.append`'
| def extend(self, rows, tags=list()):
| for row in rows:
self.append(row, tags)
|
'Removes and returns the first row of the :class:`Dataset`.'
| def lpop(self):
| cache = self[0]
del self[0]
return cache
|
'Removes and returns the last row of the :class:`Dataset`.'
| def rpop(self):
| cache = self[(-1)]
del self[(-1)]
return cache
|
'Removes and returns the last row of the :class:`Dataset`.'
| def pop(self):
| return self.rpop()
|
'Inserts a column to the :class:`Dataset` at the given index.
Columns inserted must be the correct height.
You can also insert a column of a single callable object, which will
add a new column with the return values of the callable each as an
item in the column. ::
data.append_col(col=random.randint)
If inserting a column, and :class:`Dataset.headers` is set, the
header attribute must be set, and will be considered the header for
that row.
See :ref:`dyncols` for an in-depth example.
.. versionchanged:: 0.9.0
If inserting a column, and :class:`Dataset.headers` is set, the
header attribute must be set, and will be considered the header for
that row.
.. versionadded:: 0.9.0
If inserting a row, you can add :ref:`tags <tags>` to the row you are inserting.
This gives you the ability to :class:`filter <Dataset.filter>` your
:class:`Dataset` later.'
| def insert_col(self, index, col=None, header=None):
| if (col is None):
col = []
if hasattr(col, '__call__'):
col = list(map(col, self._data))
col = self._clean_col(col)
self._validate(col=col)
if self.headers:
if (not header):
raise HeadersNeeded()
elif (header and (self.height == 0) and len(col)):
raise InvalidDimensions
self.headers.insert(index, header)
if (self.height and self.width):
for (i, row) in enumerate(self._data):
row.insert(index, col[i])
self._data[i] = row
else:
self._data = [Row([row]) for row in col]
|
'Adds a column to the end of the :class:`Dataset`.
See :class:`Dataset.insert` for additional documentation.'
| def rpush_col(self, col, header=None):
| self.insert_col(self.width, col, header=header)
|
'Adds a column to the top of the :class:`Dataset`.
See :class:`Dataset.insert` for additional documentation.'
| def lpush_col(self, col, header=None):
| self.insert_col(0, col, header=header)
|
'Adds a separator to :class:`Dataset` at given index.'
| def insert_separator(self, index, text='-'):
| sep = (index, text)
self._separators.append(sep)
|
'Adds a :ref:`separator <separators>` to the :class:`Dataset`.'
| def append_separator(self, text='-'):
| if (not self.headers):
index = (self.height if self.height else 0)
else:
index = ((self.height + 1) if self.height else 1)
self.insert_separator(index, text)
|
'Adds a column to the :class:`Dataset`.
See :class:`Dataset.insert_col` for additional documentation.'
| def append_col(self, col, header=None):
| self.rpush_col(col, header)
|
'Returns the column from the :class:`Dataset` at the given index.'
| def get_col(self, index):
| return [row[index] for row in self._data]
|
'Adds a :ref:`formatter` to the :class:`Dataset`.
.. versionadded:: 0.9.5
:param col: column to. Accepts index int or header str.
:param handler: reference to callback function to execute
against each cell value.'
| def add_formatter(self, col, handler):
| if isinstance(col, str):
if (col in self.headers):
col = self.headers.index(col)
else:
raise KeyError
if (not (col > self.width)):
self._formatters.append((col, handler))
else:
raise InvalidDatasetIndex
return True
|
'Returns a new instance of the :class:`Dataset`, excluding any rows
that do not contain the given :ref:`tags <tags>`.'
| def filter(self, tag):
| _dset = copy(self)
_dset._data = [row for row in _dset._data if row.has_tag(tag)]
return _dset
|
'Sort a :class:`Dataset` by a specific column, given string (for
header) or integer (for column index). The order can be reversed by
setting ``reverse`` to ``True``.
Returns a new :class:`Dataset` instance where columns have been
sorted.'
| def sort(self, col, reverse=False):
| if (isinstance(col, str) or isinstance(col, unicode)):
if (not self.headers):
raise HeadersNeeded
_sorted = sorted(self.dict, key=itemgetter(col), reverse=reverse)
_dset = Dataset(headers=self.headers, title=self.title)
for item in _sorted:
row = [item[key] for key in self.headers]
_dset.append(row=row)
else:
if self.headers:
col = self.headers[col]
_sorted = sorted(self.dict, key=itemgetter(col), reverse=reverse)
_dset = Dataset(headers=self.headers, title=self.title)
for item in _sorted:
if self.headers:
row = [item[key] for key in self.headers]
else:
row = item
_dset.append(row=row)
return _dset
|
'Transpose a :class:`Dataset`, turning rows into columns and vice
versa, returning a new ``Dataset`` instance. The first row of the
original instance becomes the new header row.'
| def transpose(self):
| if (not self):
return
_dset = Dataset()
new_headers = ([self.headers[0]] + self[self.headers[0]])
_dset.headers = new_headers
for (index, column) in enumerate(self.headers):
if (column == self.headers[0]):
continue
row_data = ([column] + self.get_col(index))
row_data = Row(row_data)
_dset.append(row=row_data)
return _dset
|
'Stack two :class:`Dataset` instances together by
joining at the row level, and return new combined
``Dataset`` instance.'
| def stack(self, other):
| if (not isinstance(other, Dataset)):
return
if (self.width != other.width):
raise InvalidDimensions
_dset = copy(self)
rows_to_stack = [row for row in _dset._data]
other_rows = [row for row in other._data]
rows_to_stack.extend(other_rows)
_dset._data = rows_to_stack
return _dset
|
'Stack two :class:`Dataset` instances together by
joining at the column level, and return a new
combined ``Dataset`` instance. If either ``Dataset``
has headers set, than the other must as well.'
| def stack_cols(self, other):
| if (not isinstance(other, Dataset)):
return
if (self.headers or other.headers):
if ((not self.headers) or (not other.headers)):
raise HeadersNeeded
if (self.height != other.height):
raise InvalidDimensions
try:
new_headers = (self.headers + other.headers)
except TypeError:
new_headers = None
_dset = Dataset()
for column in self.headers:
_dset.append_col(col=self[column])
for column in other.headers:
_dset.append_col(col=other[column])
_dset.headers = new_headers
return _dset
|
'Removes all duplicate rows from the :class:`Dataset` object
while maintaining the original order.'
| def remove_duplicates(self):
| seen = set()
self._data[:] = [row for row in self._data if (not ((tuple(row) in seen) or seen.add(tuple(row))))]
|
'Removes all content and headers from the :class:`Dataset` object.'
| def wipe(self):
| self._data = list()
self.__headers = None
|
'Returns a new instance of the :class:`Dataset`,
including only specified rows and columns.'
| def subset(self, rows=None, cols=None):
| if (not self):
return
if (rows is None):
rows = list(range(self.height))
if (cols is None):
cols = list(self.headers)
rows = [row for row in rows if (row in range(self.height))]
cols = [header for header in cols if (header in self.headers)]
_dset = Dataset()
_dset.headers = list(cols)
_dset._data = []
for (row_no, row) in enumerate(self._data):
data_row = []
for key in _dset.headers:
if (key in self.headers):
pos = self.headers.index(key)
data_row.append(row[pos])
else:
raise KeyError
if (row_no in rows):
_dset.append(row=Row(data_row))
return _dset
|
'Removes all :class:`Dataset` objects from the :class:`Databook`.'
| def wipe(self):
| self._datasets = []
|
'Adds format properties.'
| @classmethod
def _register_formats(cls):
| for fmt in formats.available:
try:
try:
setattr(cls, fmt.title, property(fmt.export_book, fmt.import_book))
cls._formats[fmt.title] = (fmt.export_book, fmt.import_book)
except AttributeError:
setattr(cls, fmt.title, property(fmt.export_book))
cls._formats[fmt.title] = (fmt.export_book, None)
except AttributeError:
cls._formats[fmt.title] = (None, None)
|
'Adds given :class:`Dataset` to the :class:`Databook`.'
| def add_sheet(self, dataset):
| if isinstance(dataset, Dataset):
self._datasets.append(dataset)
else:
raise InvalidDatasetType
|
'Packages :class:`Databook` for delivery.'
| def _package(self, ordered=True):
| collector = []
if ordered:
dict_pack = OrderedDict
else:
dict_pack = dict
for dset in self._datasets:
collector.append(dict_pack(title=dset.title, data=dset._package(ordered=ordered)))
return collector
|
'The number of the :class:`Dataset` objects within :class:`Databook`.'
| @property
def size(self):
| return len(self._datasets)
|
'Import `in_stream` to the :class:`Databook` object using the `format`.
:param \*\*kwargs: (optional) custom configuration to the format `import_book`.'
| def load(self, format, in_stream, **kwargs):
| if (not format):
format = detect_format(in_stream)
(export_book, import_book) = self._formats.get(format, (None, None))
if (not import_book):
raise UnsupportedFormat('Format {0} cannot be loaded.'.format(format))
import_book(self, in_stream, **kwargs)
return self
|
'Export :class:`Databook` object to `format`.
:param \*\*kwargs: (optional) custom configuration to the format `export_book`.'
| def export(self, format, **kwargs):
| (export_book, import_book) = self._formats.get(format, (None, None))
if (not export_book):
raise UnsupportedFormat('Format {0} cannot be exported.'.format(format))
return export_book(self, **kwargs)
|
'Append the actual tags to content.'
| def render(self, tag, single, between, kwargs):
| out = ('<%s' % tag)
for (key, value) in kwargs.items():
if (value is not None):
key = key.strip('_')
if (key == 'http_equiv'):
key = 'http-equiv'
elif (key == 'accept_charset'):
key = 'accept-charset'
out = ('%s %s="%s"' % (out, key, escape(value)))
else:
out = ('%s %s' % (out, key))
if (between is not None):
out = ('%s>%s</%s>' % (out, between, tag))
elif single:
out = ('%s />' % out)
else:
out = ('%s>' % out)
if (self.parent is not None):
self.parent.content.append(out)
else:
return out
|
'Append a closing tag unless element has only opening tag.'
| def close(self):
| if (self.tag in self.parent.twotags):
self.parent.content.append(('</%s>' % self.tag))
elif (self.tag in self.parent.onetags):
raise ClosingError(self.tag)
elif ((self.parent.mode == 'strict_html') and (self.tag in self.parent.deptags)):
raise DeprecationError(self.tag)
|
'Append an opening tag.'
| def open(self, **kwargs):
| if ((self.tag in self.parent.twotags) or (self.tag in self.parent.onetags)):
self.render(self.tag, False, None, kwargs)
elif ((self.mode == 'strict_html') and (self.tag in self.parent.deptags)):
raise DeprecationError(self.tag)
|
'Stuff that effects the whole document.
mode -- \'strict_html\' for HTML 4.01 (default)
\'html\' alias for \'strict_html\'
\'loose_html\' to allow some deprecated elements
\'xml\' to allow arbitrary elements
case -- \'lower\' element names will be printed in lower case (default)
\'upper\' they will be printed in upper case
onetags -- list or tuple of valid elements with opening tags only
twotags -- list or tuple of valid elements with both opening and closing tags
these two keyword arguments may be used to select
the set of valid elements in \'xml\' mode
invalid elements will raise appropriate exceptions
separator -- string to place between added elements, defaults to newline
class_ -- a class that will be added to every element if defined'
| def __init__(self, mode='strict_html', case='lower', onetags=None, twotags=None, separator='\n', class_=None):
| valid_onetags = ['AREA', 'BASE', 'BR', 'COL', 'FRAME', 'HR', 'IMG', 'INPUT', 'LINK', 'META', 'PARAM']
valid_twotags = ['A', 'ABBR', 'ACRONYM', 'ADDRESS', 'B', 'BDO', 'BIG', 'BLOCKQUOTE', 'BODY', 'BUTTON', 'CAPTION', 'CITE', 'CODE', 'COLGROUP', 'DD', 'DEL', 'DFN', 'DIV', 'DL', 'DT', 'EM', 'FIELDSET', 'FORM', 'FRAMESET', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'HEAD', 'HTML', 'I', 'IFRAME', 'INS', 'KBD', 'LABEL', 'LEGEND', 'LI', 'MAP', 'NOFRAMES', 'NOSCRIPT', 'OBJECT', 'OL', 'OPTGROUP', 'OPTION', 'P', 'PRE', 'Q', 'SAMP', 'SCRIPT', 'SELECT', 'SMALL', 'SPAN', 'STRONG', 'STYLE', 'SUB', 'SUP', 'TABLE', 'TBODY', 'TD', 'TEXTAREA', 'TFOOT', 'TH', 'THEAD', 'TITLE', 'TR', 'TT', 'UL', 'VAR']
deprecated_onetags = ['BASEFONT', 'ISINDEX']
deprecated_twotags = ['APPLET', 'CENTER', 'DIR', 'FONT', 'MENU', 'S', 'STRIKE', 'U']
self.header = []
self.content = []
self.footer = []
self.case = case
self.separator = separator
self._full = False
self.class_ = class_
if ((mode == 'strict_html') or (mode == 'html')):
self.onetags = valid_onetags
self.onetags += list(map(str.lower, self.onetags))
self.twotags = valid_twotags
self.twotags += list(map(str.lower, self.twotags))
self.deptags = (deprecated_onetags + deprecated_twotags)
self.deptags += list(map(str.lower, self.deptags))
self.mode = 'strict_html'
elif (mode == 'loose_html'):
self.onetags = (valid_onetags + deprecated_onetags)
self.onetags += list(map(str.lower, self.onetags))
self.twotags = (valid_twotags + deprecated_twotags)
self.twotags += list(map(str.lower, self.twotags))
self.mode = mode
elif (mode == 'xml'):
if (onetags and twotags):
self.onetags = onetags
self.twotags = twotags
elif ((onetags and (not twotags)) or (twotags and (not onetags))):
raise CustomizationError()
else:
self.onetags = russell()
self.twotags = russell()
self.mode = mode
else:
raise ModeError(mode)
|
'Return the document as a string.
escape -- False print normally
True replace < and > by < and >
the default escape sequences in most browsers'
| def __call__(self, escape=False):
| if escape:
return _escape(self.__str__())
else:
return self.__str__()
|
'This is an alias to addcontent.'
| def add(self, text):
| self.addcontent(text)
|
'Add some text to the bottom of the document'
| def addfooter(self, text):
| self.footer.append(text)
|
'Add some text to the top of the document'
| def addheader(self, text):
| self.header.append(text)
|
'Add some text to the main part of the document'
| def addcontent(self, text):
| self.content.append(text)
|
'This method is used for complete documents with appropriate
doctype, encoding, title, etc information. For an HTML/XML snippet
omit this method.
lang -- language, usually a two character string, will appear
as <html lang=\'en\'> in html mode (ignored in xml mode)
css -- Cascading Style Sheet filename as a string or a list of
strings for multiple css files (ignored in xml mode)
metainfo -- a dictionary in the form { \'name\':\'content\' } to be inserted
into meta element(s) as <meta name=\'name\' content=\'content\'>
(ignored in xml mode)
bodyattrs --a dictionary in the form { \'key\':\'value\', ... } which will be added
as attributes of the <body> element as <body key=\'value\' ... >
(ignored in xml mode)
script -- dictionary containing src:type pairs, <script type=\'text/type\' src=src></script>
title -- the title of the document as a string to be inserted into
a title element as <title>my title</title> (ignored in xml mode)
header -- some text to be inserted right after the <body> element
(ignored in xml mode)
footer -- some text to be inserted right before the </body> element
(ignored in xml mode)
charset -- a string defining the character set, will be inserted into a
<meta http-equiv=\'Content-Type\' content=\'text/html; charset=myset\'>
element (ignored in xml mode)
encoding -- a string defining the encoding, will be put into to first line of
the document as <?xml version=\'1.0\' encoding=\'myencoding\' ?> in
xml mode (ignored in html mode)
doctype -- the document type string, defaults to
<!DOCTYPE HTML PUBLIC \'-//W3C//DTD HTML 4.01 Transitional//EN\'>
in html mode (ignored in xml mode)'
| def init(self, lang='en', css=None, metainfo=None, title=None, header=None, footer=None, charset=None, encoding=None, doctype=None, bodyattrs=None, script=None):
| self._full = True
if ((self.mode == 'strict_html') or (self.mode == 'loose_html')):
if (doctype is None):
doctype = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN'>"
self.header.append(doctype)
self.html(lang=lang)
self.head()
if (charset is not None):
self.meta(http_equiv='Content-Type', content=('text/html; charset=%s' % charset))
if (metainfo is not None):
self.metainfo(metainfo)
if (css is not None):
self.css(css)
if (title is not None):
self.title(title)
if (script is not None):
self.scripts(script)
self.head.close()
if (bodyattrs is not None):
self.body(**bodyattrs)
else:
self.body()
if (header is not None):
self.content.append(header)
if (footer is not None):
self.footer.append(footer)
elif (self.mode == 'xml'):
if (doctype is None):
if (encoding is not None):
doctype = ("<?xml version='1.0' encoding='%s' ?>" % encoding)
else:
doctype = "<?xml version='1.0' ?>"
self.header.append(doctype)
|
'This convenience function is only useful for html.
It adds css stylesheet(s) to the document via the <link> element.'
| def css(self, filelist):
| if isinstance(filelist, str):
self.link(href=filelist, rel='stylesheet', type='text/css', media='all')
else:
for file in filelist:
self.link(href=file, rel='stylesheet', type='text/css', media='all')
|
'This convenience function is only useful for html.
It adds meta information via the <meta> element, the argument is
a dictionary of the form { \'name\':\'content\' }.'
| def metainfo(self, mydict):
| if isinstance(mydict, dict):
for (name, content) in mydict.items():
self.meta(name=name, content=content)
else:
raise TypeError('Metainfo should be called with a dictionary argument of name:content pairs.')
|
'Only useful in html, mydict is dictionary of src:type pairs will
be rendered as <script type=\'text/type\' src=src></script>'
| def scripts(self, mydict):
| if isinstance(mydict, dict):
for (src, type) in mydict.items():
self.script('', src=src, type=('text/%s' % type))
else:
raise TypeError('Script should be given a dictionary of src:type pairs.')
|
'Return `DbfFieldDef` instance from the current definition.'
| def getDbfField(self):
| return self.cls(self.name, self.len, self.dec)
|
'Create a `DbfFieldDef` instance and append it to the dbf header.
Arguments:
dbfh: `DbfHeader` instance.'
| def appendToHeader(self, dbfh):
| _dbff = self.getDbfField()
dbfh.addField(_dbff)
|
'Add field definition.
Arguments:
name:
field name (str object). field name must not
contain ASCII NULs and it\'s length shouldn\'t
exceed 10 characters.
typ:
type of the field. this must be a single character
from the "CNLMDT" set meaning character, numeric,
logical, memo, date and date/time respectively.
len:
length of the field. this argument is used only for
the character and numeric fields. all other fields
have fixed length.
FIXME: use None as a default for this argument?
dec:
decimal precision. used only for the numric fields.'
| def add_field(self, name, typ, len, dec=0):
| self.fields.append(self.FieldDefinitionClass(name, typ, len, dec))
|
'Create empty .DBF file using current structure.'
| def write(self, filename):
| _dbfh = DbfHeader()
_dbfh.setCurrentDate()
for _fldDef in self.fields:
_fldDef.appendToHeader(_dbfh)
_dbfStream = open(filename, 'wb')
_dbfh.write(_dbfStream)
_dbfStream.close()
|
'Initialize instance.'
| def __init__(self, name, length=None, decimalCount=None, start=None, stop=None, ignoreErrors=False):
| assert (self.typeCode is not None), 'Type code must be overriden'
assert (self.defaultValue is not None), 'Default value must be overriden'
if (len(name) > 10):
raise ValueError(('Field name "%s" is too long' % name))
name = str(name).upper()
if (self.__class__.length is None):
if (length is None):
raise ValueError(("[%s] Length isn't specified" % name))
length = int(length)
if (length <= 0):
raise ValueError(('[%s] Length must be a positive integer' % name))
else:
length = self.length
if (decimalCount is None):
decimalCount = 0
self.name = name
self.length = length
self.decimalCount = decimalCount
self.ignoreErrors = ignoreErrors
self.start = start
self.end = stop
|
'Decode dbf field definition from the string data.
Arguments:
string:
a string, dbf definition is decoded from. length of
the string must be 32 bytes.
start:
position in the database file.
ignoreErrors:
initial error processing mode for the new field (boolean)'
| def fromString(cls, string, start, ignoreErrors=False):
| assert (len(string) == 32)
_length = string[16]
return cls(utils.unzfill(string)[:11].decode('utf-8'), _length, string[17], start, (start + _length), ignoreErrors=ignoreErrors)
|
'Return encoded field definition.
Return:
Return value is a string object containing encoded
definition of this field.'
| def toString(self):
| if (sys.version_info < (2, 4)):
_name = (self.name[:11] + ('\x00' * (11 - len(self.name))))
else:
_name = self.name.ljust(11, '\x00')
return (((((_name + self.typeCode) + (chr(0) * 4)) + chr(self.length)) + chr(self.decimalCount)) + (chr(0) * 14))
|
'Return field information.
Return:
Return value is a (name, type, length, decimals) tuple.'
| def fieldInfo(self):
| return (self.name, self.typeCode, self.length, self.decimalCount)
|
'Return a "raw" field value from the record string.'
| def rawFromRecord(self, record):
| return record[self.start:self.end]
|
'Return decoded field value from the record string.'
| def decodeFromRecord(self, record):
| try:
return self.decodeValue(self.rawFromRecord(record))
except:
if self.ignoreErrors:
return utils.INVALID_VALUE
else:
raise
|
'Return decoded value from string value.
This method shouldn\'t be used publicly. It\'s called from the
`decodeFromRecord` method.
This is an abstract method and it must be overridden in child classes.'
| def decodeValue(self, value):
| raise NotImplementedError
|
'Return str object containing encoded field value.
This is an abstract method and it must be overriden in child classes.'
| def encodeValue(self, value):
| raise NotImplementedError
|
'Return string object.
Return value is a ``value`` argument with stripped right spaces.'
| def decodeValue(self, value):
| return value.rstrip(' ').decode('utf-8')
|
'Return raw data string encoded from a ``value``.'
| def encodeValue(self, value):
| return str(value)[:self.length].ljust(self.length)
|
'Return a number decoded from ``value``.
If decimals is zero, value will be decoded as an integer;
or as a float otherwise.
Return:
Return value is a int (long) or float instance.'
| def decodeValue(self, value):
| value = value.strip(' \x00')
if ('.' in value):
return float(value)
elif value:
return int(value)
else:
return 0
|
'Return string containing encoded ``value``.'
| def encodeValue(self, value):
| _rv = ('%*.*f' % (self.length, self.decimalCount, value))
if (len(_rv) > self.length):
_ppos = _rv.find('.')
if (0 <= _ppos <= self.length):
_rv = _rv[:self.length]
else:
raise ValueError(('[%s] Numeric overflow: %s (field width: %i)' % (self.name, _rv, self.length)))
return _rv
|
'Return an integer number decoded from ``value``.'
| def decodeValue(self, value):
| return struct.unpack('<i', value)[0]
|
'Return string containing encoded ``value``.'
| def encodeValue(self, value):
| return struct.pack('<i', int(value))
|
'Return float number decoded from ``value``.'
| def decodeValue(self, value):
| return (struct.unpack('<q', value)[0] / 10000.0)
|
'Return string containing encoded ``value``.'
| def encodeValue(self, value):
| return struct.pack('<q', round((value * 10000)))
|
'Return True, False or -1 decoded from ``value``.'
| def decodeValue(self, value):
| if (value == '?'):
return (-1)
if (value in 'NnFf '):
return False
if (value in 'YyTt'):
return True
raise ValueError(('[%s] Invalid logical value %r' % (self.name, value)))
|
'Return a character from the "TF?" set.
Return:
Return value is "T" if ``value`` is True
"?" if value is -1 or False otherwise.'
| def encodeValue(self, value):
| if (value is True):
return 'T'
if (value == (-1)):
return '?'
return 'F'
|
'Return int .dbt block number decoded from the string object.'
| def decodeValue(self, value):
| raise NotImplementedError
|
'Return raw data string encoded from a ``value``.
Note: this is an internal method.'
| def encodeValue(self, value):
| raise NotImplementedError
|
'Return a ``datetime.date`` instance decoded from ``value``.'
| def decodeValue(self, value):
| if value.strip():
return utils.getDate(value)
else:
return None
|
'Return a string-encoded value.
``value`` argument should be a value suitable for the
`utils.getDate` call.
Return:
Return value is a string in format "yyyymmdd".'
| def encodeValue(self, value):
| if value:
return utils.getDate(value).strftime('%Y%m%d')
else:
return (' ' * self.length)
|
'Return a `datetime.datetime` instance.'
| def decodeValue(self, value):
| assert (len(value) == self.length)
(_jdn, _msecs) = struct.unpack('<2I', value)
if (_jdn >= 1):
_rv = datetime.datetime.fromordinal((_jdn - self.JDN_GDN_DIFF))
_rv += datetime.timedelta(0, (_msecs / 1000.0))
else:
_rv = None
return _rv
|
'Return a string-encoded ``value``.'
| def encodeValue(self, value):
| if value:
value = utils.getDateTime(value)
_rv = struct.pack('<2I', (value.toordinal() + self.JDN_GDN_DIFF), ((((value.hour * 3600) + (value.minute * 60)) + value.second) * 1000))
else:
_rv = ('\x00' * self.length)
assert (len(_rv) == self.length)
return _rv
|
'Initialize instance.
Arguments:
fields:
a list of field definitions;
recordLength:
size of the records;
headerLength:
size of the header;
recordCount:
number of records stored in DBF;
signature:
version number (aka signature). using 0x03 as a default meaning
"File without DBT". for more information about this field visit
``http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_NOTE_1_TARGET``
lastUpdate:
date of the DBF\'s update. this could be a string (\'yymmdd\' or
\'yyyymmdd\'), timestamp (int or float), datetime/date value,
a sequence (assuming (yyyy, mm, dd, ...)) or an object having
callable ``ticks`` field.
ignoreErrors:
error processing mode for DBF fields (boolean)'
| def __init__(self, fields=None, headerLength=0, recordLength=0, recordCount=0, signature=3, lastUpdate=None, ignoreErrors=False):
| self.signature = signature
if (fields is None):
self.fields = []
else:
self.fields = list(fields)
self.lastUpdate = getDate(lastUpdate)
self.recordLength = recordLength
self.headerLength = headerLength
self.recordCount = recordCount
self.ignoreErrors = ignoreErrors
self.changed = bool(self.fields)
|
'Return header instance from the string object.'
| def fromString(cls, string):
| return cls.fromStream(io.StringIO(str(string)))
|
'Return header object from the stream.'
| def fromStream(cls, stream):
| stream.seek(0)
first_32 = stream.read(32)
if (type(first_32) != bytes):
_data = bytes(first_32, sys.getfilesystemencoding())
_data = first_32
(_cnt, _hdrLen, _recLen) = struct.unpack('<I2H', _data[4:12])
_year = _data[1]
if (_year < 80):
_year += 2000
else:
_year += 1900
_obj = cls(None, _hdrLen, _recLen, _cnt, _data[0], (_year, _data[2], _data[3]))
_pos = 1
_data = stream.read(1)
while (_data != '\r'):
_data += stream.read(31)
_fld = fields.lookupFor(_data[11]).fromString(_data, _pos)
_obj._addField(_fld)
_pos = _fld.end
_data = stream.read(1)
return _obj
|
'Update `ignoreErrors` flag on self and all fields'
| def ignoreErrors(self, value):
| self._ignore_errors = value = bool(value)
for _field in self.fields:
_field.ignoreErrors = value
|
'Internal variant of the `addField` method.
This method doesn\'t set `self.changed` field to True.
Return value is a length of the appended records.
Note: this method doesn\'t modify ``recordLength`` and
``headerLength`` fields. Use `addField` instead of this
method if you don\'t exactly know what you\'re doing.'
| def _addField(self, *defs):
| _defs = []
_recordLength = 0
for _def in defs:
if isinstance(_def, fields.DbfFieldDef):
_obj = _def
else:
(_name, _type, _len, _dec) = (tuple(_def) + ((None,) * 4))[:4]
_cls = fields.lookupFor(_type)
_obj = _cls(_name, _len, _dec, ignoreErrors=self._ignore_errors)
_recordLength += _obj.length
_defs.append(_obj)
self.fields += _defs
return _recordLength
|
'Add field definition to the header.
Examples:
dbfh.addField(
("name", "C", 20),
dbf.DbfCharacterFieldDef("surname", 20),
dbf.DbfDateFieldDef("birthdate"),
("member", "L"),
dbfh.addField(("price", "N", 5, 2))
dbfh.addField(dbf.DbfNumericFieldDef("origprice", 5, 2))'
| def addField(self, *defs):
| _oldLen = self.recordLength
self.recordLength += self._addField(*defs)
if (not _oldLen):
self.recordLength += 1
self.headerLength = ((32 + (32 * len(self.fields))) + 1)
self.changed = True
|
'Encode and write header to the stream.'
| def write(self, stream):
| stream.seek(0)
stream.write(self.toString())
fields = [_fld.toString() for _fld in self.fields]
stream.write(''.join(fields).encode(sys.getfilesystemencoding()))
stream.write('\r')
self.changed = False
|
'Returned 32 chars length string with encoded header.'
| def toString(self):
| return (struct.pack('<4BI2H', self.signature, (self.year - 1900), self.month, self.day, self.recordCount, self.headerLength, self.recordLength) + ('\x00' * 20))
|
'Update ``self.lastUpdate`` field with current date value.'
| def setCurrentDate(self):
| self.lastUpdate = datetime.date.today()
|
'Return a field definition by numeric index or name string'
| def __getitem__(self, item):
| if isinstance(item, str):
_name = item.upper()
for _field in self.fields:
if (_field.name == _name):
return _field
else:
raise KeyError(item)
else:
return self.fields[item]
|
'Initialize instance.
Arguments:
f:
Filename or file-like object.
new:
True if new data table must be created. Assume
data table exists if this argument is False.
readOnly:
if ``f`` argument is a string file will
be opend in read-only mode; in other cases
this argument is ignored. This argument is ignored
even if ``new`` argument is True.
headerObj:
`header.DbfHeader` instance or None. If this argument
is None, new empty header will be used with the
all fields set by default.
ignoreErrors:
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.'
| def __init__(self, f, readOnly=False, new=False, ignoreErrors=False):
| if isinstance(f, str):
self.name = f
if new:
self.stream = open(f, 'w+b')
else:
self.stream = open(f, ('r+b', 'rb')[bool(readOnly)])
else:
self.name = getattr(f, 'name', '')
self.stream = f
if new:
self.header = self.HeaderClass()
else:
self.header = self.HeaderClass.fromStream(self.stream)
self.ignoreErrors = ignoreErrors
self._new = bool(new)
self._changed = False
|
'Update `ignoreErrors` flag on the header object and self'
| def ignoreErrors(self, value):
| self.header.ignoreErrors = self._ignore_errors = bool(value)
|
'Return fixed index.
This method fails if index isn\'t a numeric object
(long or int). Or index isn\'t in a valid range
(less or equal to the number of records in the db).
If ``index`` is a negative number, it will be
treated as a negative indexes for list objects.
Return:
Return value is numeric object maning valid index.'
| def _fixIndex(self, index):
| if (not isinstance(index, int)):
raise TypeError('Index must be a numeric object')
if (index < 0):
index += (len(self) + 1)
if (index >= len(self)):
raise IndexError('Record index out of range')
return index
|
'Flush data to the associated stream.'
| def flush(self):
| if self.changed:
self.header.setCurrentDate()
self.header.write(self.stream)
self.stream.flush()
self._changed = False
|
'Index of field named ``name``.'
| def indexOfFieldName(self, name):
| names = [f.name for f in self.header.fields]
return names.index(name.upper())
|
'Return new record, which belong to this table.'
| def newRecord(self):
| return self.RecordClass(self)
|
'Append ``record`` to the database.'
| def append(self, record):
| record.index = self.header.recordCount
record._write()
self.header.recordCount += 1
self._changed = True
self._new = False
|
'Add field definitions.
For more information see `header.DbfHeader.addField`.'
| def addField(self, *defs):
| if self._new:
self.header.addField(*defs)
else:
raise TypeError("At least one record was added, structure can't be changed")
|
'Return number of records.'
| def __len__(self):
| return self.recordCount
|
'Return `DbfRecord` instance.'
| def __getitem__(self, index):
| return self.RecordClass.fromStream(self, self._fixIndex(index))
|
'Write `DbfRecord` instance to the stream.'
| def __setitem__(self, index, record):
| record.index = self._fixIndex(index)
record._write()
self._changed = True
self._new = False
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.