Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def print_float(self, value, decimal_digits=2, justify_right=True):
format_string = '{{0:0.{0}F}}'.format(decimal_digits)
self.print_number_str(format_string.format(value), justify_right)
|
[
"Print a numeric value to the display. If value is negative\n it will be printed with a leading minus sign. Decimal digits is the\n desired number of digits after the decimal point.\n "
] |
Please provide a description of the function:def print_hex(self, value, justify_right=True):
if value < 0 or value > 0xFFFF:
# Ignore out of range values.
return
self.print_str('{0:X}'.format(value), justify_right)
|
[
"Print a numeric value in hexadecimal. Value should be from 0 to FFFF.\n "
] |
Please provide a description of the function:def set_digit_raw(self, pos, bitmask):
if pos < 0 or pos > 3:
# Ignore out of bounds digits.
return
# Jump past the colon at position 2 by adding a conditional offset.
offset = 0 if pos < 2 else 1
# Calculate the correct position depending on orientation
if self.invert:
pos = 4-(pos+offset)
else:
pos = pos+offset
# Set the digit bitmask value at the appropriate position.
self.buffer[pos*2] = bitmask & 0xFF
|
[
"Set digit at position to raw bitmask value. Position should be a value\n of 0 to 3 with 0 being the left most digit on the display."
] |
Please provide a description of the function:def set_decimal(self, pos, decimal):
if pos < 0 or pos > 3:
# Ignore out of bounds digits.
return
# Jump past the colon at position 2 by adding a conditional offset.
offset = 0 if pos < 2 else 1
# Calculate the correct position depending on orientation
if self.invert:
pos = 4-(pos+offset)
else:
pos = pos+offset
# Set bit 7 (decimal point) based on provided value.
if decimal:
self.buffer[pos*2] |= (1 << 7)
else:
self.buffer[pos*2] &= ~(1 << 7)
|
[
"Turn decimal point on or off at provided position. Position should be\n a value 0 to 3 with 0 being the left most digit on the display. Decimal\n should be True to turn on the decimal point and False to turn it off.\n "
] |
Please provide a description of the function:def set_digit(self, pos, digit, decimal=False):
if self.invert:
self.set_digit_raw(pos, IDIGIT_VALUES.get(str(digit).upper(), 0x00))
else:
self.set_digit_raw(pos, DIGIT_VALUES.get(str(digit).upper(), 0x00))
if decimal:
self.set_decimal(pos, True)
|
[
"Set digit at position to provided value. Position should be a value\n of 0 to 3 with 0 being the left most digit on the display. Digit should\n be a number 0-9, character A-F, space (all LEDs off), or dash (-).\n "
] |
Please provide a description of the function:def set_left_colon(self, show_colon):
if show_colon:
self.buffer[4] |= 0x04
self.buffer[4] |= 0x08
else:
self.buffer[4] &= (~0x04) & 0xFF
self.buffer[4] &= (~0x08) & 0xFF
|
[
"Turn the left colon on with show color True, or off with show colon\n False. Only the large 1.2\" 7-segment display has a left colon.\n "
] |
Please provide a description of the function:def print_number_str(self, value, justify_right=True):
# Calculate length of value without decimals.
length = sum(map(lambda x: 1 if x != '.' else 0, value))
# Error if value without decimals is longer than 4 characters.
if length > 4:
self.print_number_str('----')
return
# Calculcate starting position of digits based on justification.
pos = (4-length) if justify_right else 0
# Go through each character and print it on the display.
for i, ch in enumerate(value):
if ch == '.':
# Print decimal points on the previous digit.
self.set_decimal(pos-1, True)
else:
self.set_digit(pos, ch)
pos += 1
|
[
"Print a 4 character long string of numeric values to the display.\n Characters in the string should be any supported character by set_digit,\n or a decimal point. Decimal point characters will be associated with\n the previous character.\n "
] |
Please provide a description of the function:def print_hex(self, value, justify_right=True):
if value < 0 or value > 0xFFFF:
# Ignore out of range values.
return
self.print_number_str('{0:X}'.format(value), justify_right)
|
[
"Print a numeric value in hexadecimal. Value should be from 0 to FFFF.\n "
] |
Please provide a description of the function:def begin(self):
# Turn on the oscillator.
self._device.writeList(HT16K33_SYSTEM_SETUP | HT16K33_OSCILLATOR, [])
# Turn display on with no blinking.
self.set_blink(HT16K33_BLINK_OFF)
# Set display to full brightness.
self.set_brightness(15)
|
[
"Initialize driver with LEDs enabled and all turned off."
] |
Please provide a description of the function:def set_blink(self, frequency):
if frequency not in [HT16K33_BLINK_OFF, HT16K33_BLINK_2HZ,
HT16K33_BLINK_1HZ, HT16K33_BLINK_HALFHZ]:
raise ValueError('Frequency must be one of HT16K33_BLINK_OFF, HT16K33_BLINK_2HZ, HT16K33_BLINK_1HZ, or HT16K33_BLINK_HALFHZ.')
self._device.writeList(HT16K33_BLINK_CMD | HT16K33_BLINK_DISPLAYON | frequency, [])
|
[
"Blink display at specified frequency. Note that frequency must be a\n value allowed by the HT16K33, specifically one of: HT16K33_BLINK_OFF,\n HT16K33_BLINK_2HZ, HT16K33_BLINK_1HZ, or HT16K33_BLINK_HALFHZ.\n "
] |
Please provide a description of the function:def set_brightness(self, brightness):
if brightness < 0 or brightness > 15:
raise ValueError('Brightness must be a value of 0 to 15.')
self._device.writeList(HT16K33_CMD_BRIGHTNESS | brightness, [])
|
[
"Set brightness of entire display to specified value (16 levels, from\n 0 to 15).\n "
] |
Please provide a description of the function:def set_led(self, led, value):
if led < 0 or led > 127:
raise ValueError('LED must be value of 0 to 127.')
# Calculate position in byte buffer and bit offset of desired LED.
pos = led // 8
offset = led % 8
if not value:
# Turn off the specified LED (set bit to zero).
self.buffer[pos] &= ~(1 << offset)
else:
# Turn on the speciried LED (set bit to one).
self.buffer[pos] |= (1 << offset)
|
[
"Sets specified LED (value of 0 to 127) to the specified value, 0/False\n for off and 1 (or any True/non-zero value) for on.\n "
] |
Please provide a description of the function:def write_display(self):
for i, value in enumerate(self.buffer):
self._device.write8(i, value)
|
[
"Write display buffer to display hardware."
] |
Please provide a description of the function:def clear(self):
for i, value in enumerate(self.buffer):
self.buffer[i] = 0
|
[
"Clear contents of display buffer."
] |
Please provide a description of the function:def get_readonly_fields(self, request, obj=None):
if obj:
return list(self.readonly_fields) + ['id', 'identity',
'is_current']
return self.readonly_fields
|
[
"\n This is required a subclass of VersionedAdmin has readonly_fields\n ours won't be undone\n "
] |
Please provide a description of the function:def get_list_display(self, request):
# Force cast to list as super get_list_display could return a tuple
list_display = list(
super(VersionedAdmin, self).get_list_display(request))
# Preprend the following fields to list display
if self.list_display_show_identity:
list_display = ['identity_shortener', ] + list_display
# Append the following fields to list display
if self.list_display_show_start_date:
list_display += ['version_start_date', ]
if self.list_display_show_end_date:
list_display += ['version_end_date', ]
return list_display + ['is_current', ]
|
[
"\n This method determines which fields go in the changelist\n "
] |
Please provide a description of the function:def get_list_filter(self, request):
list_filter = super(VersionedAdmin, self).get_list_filter(request)
return list(list_filter) + [('version_start_date', DateTimeFilter),
IsCurrentFilter]
|
[
"\n Adds versionable custom filtering ability to changelist\n "
] |
Please provide a description of the function:def restore(self, request, *args, **kwargs):
paths = request.path_info.split('/')
object_id_index = paths.index("restore") - 2
object_id = paths[object_id_index]
obj = super(VersionedAdmin, self).get_object(request, object_id)
obj.restore()
admin_wordIndex = object_id_index - 3
path = "/%s" % ("/".join(paths[admin_wordIndex:object_id_index]))
opts = self.model._meta
msg_dict = {
'name': force_text(opts.verbose_name),
'obj': format_html('<a href="{}">{}</a>',
urlquote(request.path), obj),
}
msg = format_html(_('The {name} "{obj}" was restored successfully.'),
**msg_dict)
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(path)
|
[
"\n View for restoring object from change view\n "
] |
Please provide a description of the function:def will_not_clone(self, request, *args, **kwargs):
paths = request.path_info.split('/')
index_of_object_id = paths.index("will_not_clone") - 1
object_id = paths[index_of_object_id]
self.change_view(request, object_id)
admin_wordInUrl = index_of_object_id - 3
# This gets the adminsite for the app, and the model name and joins
# together with /
path = '/' + '/'.join(paths[admin_wordInUrl:index_of_object_id])
return HttpResponseRedirect(path)
|
[
"\n Add save but not clone capability in the changeview\n "
] |
Please provide a description of the function:def exclude(self):
exclude = self.VERSIONED_EXCLUDE
if super(VersionedAdmin, self).exclude is not None:
# Force cast to list as super exclude could return a tuple
exclude = list(super(VersionedAdmin, self).exclude) + exclude
return exclude
|
[
"\n Custom descriptor for exclude since there is no get_exclude method to\n be overridden\n "
] |
Please provide a description of the function:def get_object(self, request, object_id, from_field=None):
# from_field breaks in 1.7.8
obj = super(VersionedAdmin, self).get_object(request,
object_id)
# Only clone if update view as get_object() is also called for change,
# delete, and history views
if request.method == 'POST' and \
obj and \
obj.is_latest and \
'will_not_clone' not in request.path and \
'delete' not in request.path and \
'restore' not in request.path:
obj = obj.clone()
return obj
|
[
"\n our implementation of get_object allows for cloning when updating an\n object, not cloning when the button 'save but not clone' is pushed\n and at no other time will clone be called\n "
] |
Please provide a description of the function:def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = get_object_or_404(self.get_queryset(request),
pk=unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(str(obj.identity)),
# this is the change for our override;
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
ctx = self.admin_site.each_context(request)
context = dict(ctx,
title=('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(
force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
|
[] |
Please provide a description of the function:def get_urls(self):
not_clone_url = [url(r'^(.+)/will_not_clone/$',
admin.site.admin_view(self.will_not_clone))]
restore_url = [
url(r'^(.+)/restore/$', admin.site.admin_view(self.restore))]
return not_clone_url + restore_url + super(VersionedAdmin,
self).get_urls()
|
[
"\n Appends the custom will_not_clone url to the admin site\n "
] |
Please provide a description of the function:def remove_uuid_id_like_indexes(app_name, database=None):
removed_indexes = 0
with database_connection(database).cursor() as cursor:
for model in versionable_models(app_name, include_auto_created=True):
indexes = select_uuid_like_indexes_on_table(model, cursor)
if indexes:
index_list = ','.join(['"%s"' % r[0] for r in indexes])
cursor.execute("DROP INDEX %s" % index_list)
removed_indexes += len(indexes)
return removed_indexes
|
[
"\n Remove all of varchar_pattern_ops indexes that django created for uuid\n columns.\n A search is never done with a filter of the style (uuid__like='1ae3c%'), so\n all such indexes can be removed from Versionable models.\n This will only try to remove indexes if they exist in the database, so it\n should be safe to run in a post_migrate signal handler. Running it several\n times should leave the database in the same state as running it once.\n :param str app_name: application name whose Versionable models will be\n acted on.\n :param str database: database alias to use. If None, use default\n connection.\n :return: number of indexes removed\n :rtype: int\n "
] |
Please provide a description of the function:def get_uuid_like_indexes_on_table(model):
with default_connection.cursor() as c:
indexes = select_uuid_like_indexes_on_table(model, c)
return indexes
|
[
"\n Gets a list of database index names for the given model for the\n uuid-containing fields that have had a like-index created on them.\n\n :param model: Django model\n :return: list of database rows; the first field of each row is an index\n name\n "
] |
Please provide a description of the function:def select_uuid_like_indexes_on_table(model, cursor):
# VersionedForeignKey fields as well as the id fields have these useless
# like indexes
field_names = ["'%s'" % f.column for f in model._meta.fields if
isinstance(f, VersionedForeignKey)]
field_names.append("'id'")
sql = .format(model._meta.db_table, ','.join(field_names))
cursor.execute(sql)
return cursor.fetchall()
|
[
"\n Gets a list of database index names for the given model for the\n uuid-containing fields that have had a like-index created on them.\n\n :param model: Django model\n :param cursor: database connection cursor\n :return: list of database rows; the first field of each row is an index\n name\n ",
"\n select i.relname as index_name\n from pg_class t,\n pg_class i,\n pg_index ix,\n pg_attribute a\n where t.oid = ix.indrelid\n and i.oid = ix.indexrelid\n and a.attrelid = t.oid\n and a.attnum = ANY(ix.indkey)\n and t.relkind = 'r'\n and t.relname = '{0}'\n and a.attname in ({1})\n and i.relname like '%_like'\n "
] |
Please provide a description of the function:def create_current_version_unique_indexes(app_name, database=None):
indexes_created = 0
connection = database_connection(database)
with connection.cursor() as cursor:
for model in versionable_models(app_name):
unique_field_groups = getattr(model, 'VERSION_UNIQUE', None)
if not unique_field_groups:
continue
table_name = model._meta.db_table
for group in unique_field_groups:
col_prefixes = []
columns = []
for field in group:
column = model._meta.get_field(field).column
col_prefixes.append(column[0:3])
columns.append(column)
index_name = '%s_%s_%s_v_uniq' % (
app_name, table_name, '_'.join(col_prefixes))
if not index_exists(cursor, index_name):
cursor.execute(
"CREATE UNIQUE INDEX %s ON %s(%s) "
"WHERE version_end_date IS NULL"
% (index_name, table_name, ','.join(columns)))
indexes_created += 1
return indexes_created
|
[
"\n Add unique indexes for models which have a VERSION_UNIQUE attribute.\n These must be defined as partially unique indexes, which django\n does not support.\n The unique indexes are defined so that no two *current* versions can have\n the same value.\n This will only try to create indexes if they do not exist in the database,\n so it should be safe to run in a post_migrate signal handler. Running it\n several times should leave the database in the same state as running it\n once.\n\n :param str app_name: application name whose Versionable models will be\n acted on.\n :param str database: database alias to use. If None, use default\n connection.\n :return: number of partial unique indexes created\n :rtype: int\n "
] |
Please provide a description of the function:def create_current_version_unique_identity_indexes(app_name, database=None):
indexes_created = 0
connection = database_connection(database)
with connection.cursor() as cursor:
for model in versionable_models(app_name):
if getattr(model._meta, 'managed', True):
table_name = model._meta.db_table
index_name = '%s_%s_identity_v_uniq' % (app_name, table_name)
if not index_exists(cursor, index_name):
cursor.execute(
"CREATE UNIQUE INDEX %s ON %s(%s) "
"WHERE version_end_date IS NULL"
% (index_name, table_name, 'identity'))
indexes_created += 1
return indexes_created
|
[
"\n Add partial unique indexes for the the identity column of versionable\n models.\n\n This enforces that no two *current* versions can have the same identity.\n\n This will only try to create indexes if they do not exist in the database,\n so it should be safe to run in a post_migrate signal handler. Running it\n several times should leave the database in the same state as running it\n once.\n\n :param str app_name: application name whose Versionable models will be\n acted on.\n :param str database: database alias to use. If None, use default\n connection.\n :return: number of partial unique indexes created\n :rtype: int\n "
] |
Please provide a description of the function:def get_queryset(self):
qs = VersionedQuerySet(self.model, using=self._db)
if hasattr(self, 'instance') and hasattr(self.instance, '_querytime'):
qs.querytime = self.instance._querytime
return qs
|
[
"\n Returns a VersionedQuerySet capable of handling version time\n restrictions.\n\n :return: VersionedQuerySet\n "
] |
Please provide a description of the function:def next_version(self, object, relations_as_of='end'):
if object.version_end_date is None:
next = object
else:
next = self.filter(
Q(identity=object.identity),
Q(version_start_date__gte=object.version_end_date)
).order_by('version_start_date').first()
if not next:
raise ObjectDoesNotExist(
"next_version couldn't find a next version of object " +
str(object.identity))
return self.adjust_version_as_of(next, relations_as_of)
|
[
"\n Return the next version of the given object.\n\n In case there is no next object existing, meaning the given\n object is the current version, the function returns this version.\n\n Note that if object's version_end_date is None, this does not check\n the database to see if there is a newer version (perhaps created by\n some other code), it simply returns the passed object.\n\n ``relations_as_of`` is used to fix the point in time for the version;\n this affects which related objects are returned when querying for\n object relations. See ``VersionManager.version_as_of`` for details\n on valid ``relations_as_of`` values.\n\n :param Versionable object: object whose next version will be returned.\n :param mixed relations_as_of: determines point in time used to access\n relations. 'start'|'end'|datetime|None\n :return: Versionable\n "
] |
Please provide a description of the function:def previous_version(self, object, relations_as_of='end'):
if object.version_birth_date == object.version_start_date:
previous = object
else:
previous = self.filter(
Q(identity=object.identity),
Q(version_end_date__lte=object.version_start_date)
).order_by('-version_end_date').first()
if not previous:
raise ObjectDoesNotExist(
"previous_version couldn't find a previous version of "
"object " + str(object.identity))
return self.adjust_version_as_of(previous, relations_as_of)
|
[
"\n Return the previous version of the given object.\n\n In case there is no previous object existing, meaning the given object\n is the first version of the object, then the function returns this\n version.\n\n ``relations_as_of`` is used to fix the point in time for the version;\n this affects which related objects are returned when querying for\n object relations. See ``VersionManager.version_as_of`` for details on\n valid ``relations_as_of`` values.\n\n :param Versionable object: object whose previous version will be\n returned.\n :param mixed relations_as_of: determines point in time used to access\n relations. 'start'|'end'|datetime|None\n :return: Versionable\n "
] |
Please provide a description of the function:def current_version(self, object, relations_as_of=None, check_db=False):
if object.version_end_date is None and not check_db:
current = object
else:
current = self.current.filter(identity=object.identity).first()
return self.adjust_version_as_of(current, relations_as_of)
|
[
"\n Return the current version of the given object.\n\n The current version is the one having its version_end_date set to NULL.\n If there is not such a version then it means the object has been\n 'deleted' and so there is no current version available. In this case\n the function returns None.\n\n Note that if check_db is False and object's version_end_date is None,\n this does not check the database to see if there is a newer version\n (perhaps created by some other code), it simply returns the passed\n object.\n\n ``relations_as_of`` is used to fix the point in time for the version;\n this affects which related objects are returned when querying for\n object relations. See ``VersionManager.version_as_of`` for details on\n valid ``relations_as_of`` values.\n\n :param Versionable object: object whose current version will be\n returned.\n :param mixed relations_as_of: determines point in time used to access\n relations. 'start'|'end'|datetime|None\n :param bool check_db: Whether or not to look in the database for a\n more recent version\n :return: Versionable\n "
] |
Please provide a description of the function:def adjust_version_as_of(version, relations_as_of):
if not version:
return version
if relations_as_of == 'end':
if version.is_current:
# Ensure that version._querytime is active, in case it wasn't
# before.
version.as_of = None
else:
version.as_of = version.version_end_date - datetime.timedelta(
microseconds=1)
elif relations_as_of == 'start':
version.as_of = version.version_start_date
elif isinstance(relations_as_of, datetime.datetime):
as_of = relations_as_of.astimezone(utc)
if not as_of >= version.version_start_date:
raise ValueError(
"Provided as_of '{}' is earlier than version's start "
"time '{}'".format(
as_of.isoformat(),
version.version_start_date.isoformat()
)
)
if version.version_end_date is not None \
and as_of >= version.version_end_date:
raise ValueError(
"Provided as_of '{}' is later than version's start "
"time '{}'".format(
as_of.isoformat(),
version.version_end_date.isoformat()
)
)
version.as_of = as_of
elif relations_as_of is None:
version._querytime = QueryTime(time=None, active=False)
else:
raise TypeError(
"as_of parameter must be 'start', 'end', None, or datetime "
"object")
return version
|
[
"\n Adjusts the passed version's as_of time to an appropriate value, and\n returns it.\n\n ``relations_as_of`` is used to fix the point in time for the version;\n this affects which related objects are returned when querying for\n object relations.\n Valid ``relations_as_of`` values and how this affects the returned\n version's as_of attribute:\n - 'start': version start date\n - 'end': version end date - 1 microsecond (no effect if version is\n current version)\n - datetime object: given datetime (raises ValueError if given datetime\n not valid for version)\n - None: unset (related object queries will not be restricted to a\n point in time)\n\n :param Versionable object: object whose as_of will be adjusted as\n requested.\n :param mixed relations_as_of: valid values are the strings 'start' or\n 'end', or a datetime object.\n :return: Versionable\n "
] |
Please provide a description of the function:def _create_at(self, timestamp=None, id=None, forced_identity=None,
**kwargs):
id = Versionable.uuid(id)
if forced_identity:
ident = Versionable.uuid(forced_identity)
else:
ident = id
if timestamp is None:
timestamp = get_utc_now()
kwargs['id'] = id
kwargs['identity'] = ident
kwargs['version_start_date'] = timestamp
kwargs['version_birth_date'] = timestamp
return super(VersionManager, self).create(**kwargs)
|
[
"\n WARNING: Only for internal use and testing.\n\n Create a Versionable having a version_start_date and\n version_birth_date set to some pre-defined timestamp\n\n :param timestamp: point in time at which the instance has to be created\n :param id: version 4 UUID unicode object. Usually this is not\n specified, it will be automatically created.\n :param forced_identity: version 4 UUID unicode object. For internal\n use only.\n :param kwargs: arguments needed for initializing the instance\n :return: an instance of the class\n "
] |
Please provide a description of the function:def as_sql(self, qn, connection):
# self.children is an array of VersionedExtraWhere-objects
from versions.fields import VersionedExtraWhere
for child in self.children:
if isinstance(child, VersionedExtraWhere) and not child.params:
_query = qn.query
query_time = _query.querytime.time
apply_query_time = _query.querytime.active
alias_map = _query.alias_map
self._set_child_joined_alias(child, alias_map)
if apply_query_time:
# Add query parameters that have not been added till now
child.set_as_of(query_time)
else:
# Remove the restriction if it's not required
child.sqls = []
return super(VersionedWhereNode, self).as_sql(qn, connection)
|
[
"\n This method identifies joined table aliases in order for\n VersionedExtraWhere.as_sql() to be able to add time restrictions for\n those tables based on the VersionedQuery's querytime value.\n\n :param qn: In Django 1.7 & 1.8 this is a compiler\n :param connection: A DB connection\n :return: A tuple consisting of (sql_string, result_params)\n "
] |
Please provide a description of the function:def _set_child_joined_alias(child, alias_map):
for table in alias_map:
join = alias_map[table]
if not isinstance(join, Join):
continue
lhs = join.parent_alias
if (lhs == child.alias and table == child.related_alias) \
or (lhs == child.related_alias and table == child.alias):
child.set_joined_alias(table)
break
|
[
"\n Set the joined alias on the child, for Django >= 1.8.0\n :param child:\n :param alias_map:\n "
] |
Please provide a description of the function:def get_compiler(self, *args, **kwargs):
if self.querytime.active and \
(not hasattr(self, '_querytime_filter_added') or
not self._querytime_filter_added):
time = self.querytime.time
if time is None:
self.add_q(Q(version_end_date__isnull=True))
else:
self.add_q(
(Q(version_end_date__gt=time) |
Q(version_end_date__isnull=True)) &
Q(version_start_date__lte=time)
)
# Ensure applying these filters happens only a single time (even
# if it doesn't falsify the query, it's just not very comfortable
# to read)
self._querytime_filter_added = True
return super(VersionedQuery, self).get_compiler(*args, **kwargs)
|
[
"\n Add the query time restriction limit at the last moment. Applying it\n earlier (e.g. by adding a filter to the queryset) does not allow the\n caching of related object to work (they are attached to a queryset;\n filter() returns a new queryset).\n "
] |
Please provide a description of the function:def build_filter(self, filter_expr, **kwargs):
lookup, value = filter_expr
if self.querytime.active \
and isinstance(value, Versionable) and not value.is_latest:
new_lookup = \
lookup + LOOKUP_SEP + Versionable.OBJECT_IDENTIFIER_FIELD
filter_expr = (new_lookup, value.identity)
return super(VersionedQuery, self).build_filter(filter_expr, **kwargs)
|
[
"\n When a query is filtered with an expression like\n .filter(team=some_team_object), where team is a VersionedForeignKey\n field, and some_team_object is a Versionable object, adapt the filter\n value to be (team__identity=some_team_object.identity).\n\n When the query is built, this will enforce that the tables are joined\n and that the identity column and the as_of restriction is used for\n matching.\n\n For example, the generated SQL will be like:\n\n SELECT ... FROM foo INNER JOIN team ON (\n foo.team_id == team.identity\n AND foo.version_start_date <= [as_of]\n AND (foo.version_end_date > [as_of]\n OR foo.version_end_date IS NULL)) ...\n\n This is not necessary, and won't be applied, if any of these are true:\n - no as_of is in effect\n - the current objects are being queried\n (e.g. foo.objects.current.filter(...))\n - a terminal object is being used as the lookup value\n (e.g. .filter(team=the_deleted_team_version)\n - the lookup value is not a Versionable\n (e.g. .filter(foo='bar') or .filter(team=non_versionable_team)\n\n Note that this has the effect that\n Foo.objects.as_of(t1).filter(team=team_object_at_t3) will return the\n Foo objects at t1, and that accessing their team field (e.g. foo.team)\n will return the team object that was associated with them at t1,\n which may be a different object than team_object_at_t3.\n\n The goal is to make expressions like\n Foo.objects.as_of(tx).filter(team=some_team_object) work as closely\n as possible to standard, non-versioned Django querysets like\n Foo.objects.filter(team=some_team_object).\n\n :param filter_expr:\n :param kwargs:\n :return: tuple\n "
] |
Please provide a description of the function:def querytime(self, value):
self._querytime = value
self.query.querytime = value
|
[
"\n Sets self._querytime as well as self.query.querytime.\n :param value: None or datetime\n :return:\n "
] |
Please provide a description of the function:def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
# TODO: Do we have to test for ValuesListIterable, ValuesIterable,
# and FlatValuesListIterable here?
if self._iterable_class == ModelIterable:
for x in self._result_cache:
self._set_item_querytime(x)
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
|
[
"\n Completely overrides the QuerySet._fetch_all method by adding the\n timestamp to all objects\n\n :return: See django.db.models.query.QuerySet._fetch_all for return\n values\n "
] |
Please provide a description of the function:def _clone(self, *args, **kwargs):
clone = super(VersionedQuerySet, self)._clone(**kwargs)
clone.querytime = self.querytime
return clone
|
[
"\n Overrides the QuerySet._clone method by adding the cloning of the\n VersionedQuerySet's query_time parameter\n\n :param kwargs: Same as the original QuerySet._clone params\n :return: Just as QuerySet._clone, this method returns a clone of the\n original object\n "
] |
Please provide a description of the function:def _set_item_querytime(self, item, type_check=True):
if isinstance(item, Versionable):
item._querytime = self.querytime
elif isinstance(item, VersionedQuerySet):
item.querytime = self.querytime
else:
if type_check:
raise TypeError(
"This item is not a Versionable, it's a " + str(
type(item)))
return item
|
[
"\n Sets the time for which the query was made on the resulting item\n\n :param item: an item of type Versionable\n :param type_check: Check the item to be a Versionable\n :return: Returns the item itself with the time set\n "
] |
Please provide a description of the function:def as_of(self, qtime=None):
clone = self._clone()
clone.querytime = QueryTime(time=qtime, active=True)
return clone
|
[
"\n Sets the time for which we want to retrieve an object.\n\n :param qtime: The UTC date and time; if None then use the current\n state (where version_end_date = NULL)\n :return: A VersionedQuerySet\n "
] |
Please provide a description of the function:def delete(self):
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
# Ensure that only current objects are selected.
del_query = self.filter(version_end_date__isnull=True)
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector_class = get_versioned_delete_collector_class()
collector = collector_class(using=del_query.db)
collector.collect(del_query)
collector.delete(get_utc_now())
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
|
[
"\n Deletes the records in the QuerySet.\n "
] |
Please provide a description of the function:def _delete_at(self, timestamp, using=None):
if self.version_end_date is None:
self.version_end_date = timestamp
self.save(force_update=True, using=using)
else:
raise DeletionOfNonCurrentVersionError(
'Cannot delete anything else but the current version')
|
[
"\n WARNING: This method is only for internal use, it should not be used\n from outside.\n\n It is used only in the case when you want to make sure a group of\n related objects are deleted at the exact same time.\n\n It is certainly not meant to be used for deleting an object and giving\n it a random deletion date of your liking.\n "
] |
Please provide a description of the function:def uuid(uuid_value=None):
if uuid_value:
if not validate_uuid(uuid_value):
raise ValueError(
"uuid_value must be a valid UUID version 4 object")
else:
uuid_value = uuid.uuid4()
if versions_settings.VERSIONS_USE_UUIDFIELD:
return uuid_value
else:
return six.u(str(uuid_value))
|
[
"\n Returns a uuid value that is valid to use for id and identity fields.\n\n :return: unicode uuid object if using UUIDFields, uuid unicode string\n otherwise.\n "
] |
Please provide a description of the function:def clone(self, forced_version_date=None, in_bulk=False):
if not self.pk:
raise ValueError('Instance must be saved before it can be cloned')
if self.version_end_date:
raise ValueError(
'This is a historical item and can not be cloned.')
if forced_version_date:
if not self.version_start_date <= forced_version_date <= \
get_utc_now():
raise ValueError(
'The clone date must be between the version start date '
'and now.')
else:
forced_version_date = get_utc_now()
if self.get_deferred_fields():
# It would be necessary to fetch the record from the database
# again for this to succeed.
# Alternatively, perhaps it would be possible to create a copy of
# the object after fetching the missing fields.
# Doing so may be unexpected by the calling code, so raise an
# exception: the calling code should be adapted if necessary.
raise ValueError(
'Can not clone a model instance that has deferred fields')
earlier_version = self
later_version = copy.copy(earlier_version)
later_version.version_end_date = None
later_version.version_start_date = forced_version_date
# set earlier_version's ID to a new UUID so the clone (later_version)
# can get the old one -- this allows 'head' to always have the original
# id allowing us to get at all historic foreign key relationships
earlier_version.id = self.uuid()
earlier_version.version_end_date = forced_version_date
if not in_bulk:
# This condition might save us a lot of database queries if we are
# being called from a loop like in .clone_relations
earlier_version.save()
later_version.save()
else:
earlier_version._not_created = True
# re-create ManyToMany relations
for field_name in self.get_all_m2m_field_names():
earlier_version.clone_relations(later_version, field_name,
forced_version_date)
return later_version
|
[
"\n Clones a Versionable and returns a fresh copy of the original object.\n Original source: ClonableMixin snippet\n (http://djangosnippets.org/snippets/1271), with the pk/id change\n suggested in the comments\n\n :param forced_version_date: a timestamp including tzinfo; this value\n is usually set only internally!\n :param in_bulk: whether not to write this objects to the database\n already, if not necessary; this value is usually set only\n internally for performance optimization\n :return: returns a fresh clone of the original object\n (with adjusted relations)\n "
] |
Please provide a description of the function:def at(self, timestamp):
# Ensure, it's not a historic item
if not self.is_current:
raise SuspiciousOperation(
"Cannot relocate this Versionable instance in time, since it "
"is a historical item")
# Ensure it's not a versioned item (that would lead to some ugly
# situations...
if not self.version_birth_date == self.version_start_date:
raise SuspiciousOperation(
"Cannot relocate this Versionable instance in time, since it "
"is a versioned instance")
# Ensure the argument is really a timestamp
if not isinstance(timestamp, datetime.datetime):
raise ValueError("This is not a datetime.datetime timestamp")
self.version_birth_date = self.version_start_date = timestamp
return self
|
[
"\n Force the create date of an object to be at a certain time; This\n method can be invoked only on a freshly created Versionable object.\n It must not have been cloned yet. Raises a SuspiciousOperation\n exception, otherwise.\n :param timestamp: a datetime.datetime instance\n "
] |
Please provide a description of the function:def restore(self, **kwargs):
if not self.pk:
raise ValueError(
'Instance must be saved and terminated before it can be '
'restored.')
if self.is_current:
raise ValueError(
'This is the current version, no need to restore it.')
if self.get_deferred_fields():
# It would be necessary to fetch the record from the database
# again for this to succeed.
# Alternatively, perhaps it would be possible to create a copy
# of the object after fetching the missing fields.
# Doing so may be unexpected by the calling code, so raise an
# exception: the calling code should be adapted if necessary.
raise ValueError(
'Can not restore a model instance that has deferred fields')
cls = self.__class__
now = get_utc_now()
restored = copy.copy(self)
restored.version_end_date = None
restored.version_start_date = now
fields = [f for f in cls._meta.local_fields if
f.name not in Versionable.VERSIONABLE_FIELDS]
for field in fields:
if field.attname in kwargs:
# Fake an object in order to avoid a DB roundtrip
# This was made necessary, since assigning to the field's
# attname did not work anymore with Django 2.0
obj = field.remote_field.model(id=kwargs[field.attname])
setattr(restored, field.name, obj)
elif field.name in kwargs:
setattr(restored, field.name, kwargs[field.name])
elif isinstance(field, ForeignKey):
# Set all non-provided ForeignKeys to None. If required,
# raise an error.
try:
setattr(restored, field.name, None)
# Check for non null foreign key removed since Django 1.10
# https://docs.djangoproject.com/en/1.10/releases/1.10/
# #removed-null-assignment-check-for-non-null-foreign-
# key-fields
if not field.null:
raise ValueError
except ValueError:
raise ForeignKeyRequiresValueError
self.id = self.uuid()
with transaction.atomic():
# If this is not the latest version, terminate the latest version
latest = cls.objects.current_version(self, check_db=True)
if latest and latest != self:
latest.delete()
restored.version_start_date = latest.version_end_date
self.save()
restored.save()
# Update ManyToMany relations to point to the old version's id
# instead of the restored version's id.
for field_name in self.get_all_m2m_field_names():
manager = getattr(restored,
field_name)
# returns a VersionedRelatedManager instance
manager.through.objects.filter(
**{manager.source_field.attname: restored.id}).update(
**{manager.source_field_name: self})
return restored
|
[
"\n Restores this version as a new version, and returns this new version.\n\n If a current version already exists, it will be terminated before\n restoring this version.\n\n Relations (foreign key, reverse foreign key, many-to-many) are not\n restored with the old version. If provided in kwargs,\n (Versioned)ForeignKey fields will be set to the provided values.\n If passing an id for a (Versioned)ForeignKey, use the field.attname.\n For example:\n restore(team_id=myteam.pk)\n If passing an object, simply use the field name, e.g.:\n restore(team=myteam)\n\n If a (Versioned)ForeignKey is not nullable and no value is provided\n for it in kwargs, a ForeignKeyRequiresValueError will be raised.\n\n :param kwargs: arguments used to initialize the class instance\n :return: Versionable\n "
] |
Please provide a description of the function:def detach(self):
self.id = self.identity = self.uuid()
self.version_start_date = self.version_birth_date = get_utc_now()
self.version_end_date = None
return self
|
[
"\n Detaches the instance from its history.\n\n Similar to creating a new object with the same field values. The id and\n identity fields are set to a new value. The returned object has not\n been saved, call save() afterwards when you are ready to persist the\n object.\n\n ManyToMany and reverse ForeignKey relations are lost for the detached\n object.\n\n :return: Versionable\n "
] |
Please provide a description of the function:def matches_querytime(instance, querytime):
if not querytime.active:
return True
if not querytime.time:
return instance.version_end_date is None
return (instance.version_start_date <= querytime.time and
(instance.version_end_date is None or
instance.version_end_date > querytime.time))
|
[
"\n Checks whether the given instance satisfies the given QueryTime object.\n\n :param instance: an instance of Versionable\n :param querytime: QueryTime value to check against\n "
] |
Please provide a description of the function:def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
super(VersionedForeignKey, self).contribute_to_related_class(cls,
related)
accessor_name = related.get_accessor_name()
if hasattr(cls, accessor_name):
setattr(cls, accessor_name,
VersionedReverseManyToOneDescriptor(related))
|
[
"\n Override ForeignKey's methods, and replace the descriptor, if set by\n the parent's methods\n "
] |
Please provide a description of the function:def get_extra_restriction(self, where_class, alias, remote_alias):
historic_sql = '''{alias}.version_start_date <= %s
AND ({alias}.version_end_date > %s
OR {alias}.version_end_date is NULL )'''
current_sql = '''{alias}.version_end_date is NULL'''
# How 'bout creating an ExtraWhere here, without params
return where_class([VersionedExtraWhere(historic_sql=historic_sql,
current_sql=current_sql,
alias=alias,
remote_alias=remote_alias)])
|
[
"\n Overrides ForeignObject's get_extra_restriction function that returns\n an SQL statement which is appended to a JOIN's conditional filtering\n part\n\n :return: SQL conditional statement\n :rtype: WhereNode\n "
] |
Please provide a description of the function:def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join \
else self.related_fields
joining_columns = tuple()
for lhs_field, rhs_field in source:
lhs_col_name = lhs_field.column
rhs_col_name = rhs_field.column
# Test whether
# - self is the current ForeignKey relationship
# - self was not auto_created (e.g. is not part of a M2M
# relationship)
if self is lhs_field and not self.auto_created:
if rhs_col_name == Versionable.VERSION_IDENTIFIER_FIELD:
rhs_col_name = Versionable.OBJECT_IDENTIFIER_FIELD
elif self is rhs_field and not self.auto_created:
if lhs_col_name == Versionable.VERSION_IDENTIFIER_FIELD:
lhs_col_name = Versionable.OBJECT_IDENTIFIER_FIELD
joining_columns = joining_columns + ((lhs_col_name, rhs_col_name),)
return joining_columns
|
[
"\n Get and return joining columns defined by this foreign key relationship\n\n :return: A tuple containing the column names of the tables to be\n joined (<local_col_name>, <remote_col_name>)\n :rtype: tuple\n "
] |
Please provide a description of the function:def contribute_to_class(self, cls, name, **kwargs):
# TODO: Apply 3 edge cases when not to create an intermediary model
# specified in django.db.models.fields.related:1566
# self.rel.through needs to be set prior to calling super, since
# super(...).contribute_to_class refers to it.
# Classes pointed to by a string do not need to be resolved here,
# since Django does that at a later point in time - which is nice... ;)
#
# Superclasses take care of:
# - creating the through class if unset
# - resolving the through class if it's a string
# - resolving string references within the through class
if not self.remote_field.through and \
not cls._meta.abstract and \
not cls._meta.swapped:
# We need to anticipate some stuff, that's done only later in
# class contribution
self.set_attributes_from_name(name)
self.model = cls
self.remote_field.through = VersionedManyToManyField.\
create_versioned_many_to_many_intermediary_model(self, cls,
name)
super(VersionedManyToManyField, self).contribute_to_class(cls, name)
# Overwrite the descriptor
if hasattr(cls, self.name):
setattr(cls, self.name,
VersionedManyToManyDescriptor(self.remote_field))
|
[
"\n Called at class type creation. So, this method is called, when\n metaclasses get created\n "
] |
Please provide a description of the function:def contribute_to_related_class(self, cls, related):
super(VersionedManyToManyField, self). \
contribute_to_related_class(cls, related)
accessor_name = related.get_accessor_name()
if accessor_name and hasattr(cls, accessor_name):
descriptor = VersionedManyToManyDescriptor(related, accessor_name)
setattr(cls, accessor_name, descriptor)
if hasattr(cls._meta, 'many_to_many_related') and isinstance(
cls._meta.many_to_many_related, list):
cls._meta.many_to_many_related.append(descriptor)
else:
cls._meta.many_to_many_related = [descriptor]
|
[
"\n Called at class type creation. So, this method is called, when\n metaclasses get created\n "
] |
Please provide a description of the function:def _set_child_joined_alias_using_join_map(child, join_map, alias_map):
for lhs, table, join_cols in join_map:
if lhs is None:
continue
if lhs == child.alias:
relevant_alias = child.related_alias
elif lhs == child.related_alias:
relevant_alias = child.alias
else:
continue
join_info = alias_map[relevant_alias]
if join_info.join_type is None:
continue
if join_info.lhs_alias in [child.alias, child.related_alias]:
child.set_joined_alias(relevant_alias)
break
|
[
"\n Set the joined alias on the child, for Django <= 1.7.x.\n :param child:\n :param join_map:\n :param alias_map:\n "
] |
Please provide a description of the function:def get_versioned_delete_collector_class():
key = 'VERSIONED_DELETE_COLLECTOR'
try:
cls = _cache[key]
except KeyError:
collector_class_string = getattr(settings, key)
cls = import_from_string(collector_class_string, key)
_cache[key] = cls
return cls
|
[
"\n Gets the class to use for deletion collection.\n\n :return: class\n "
] |
Please provide a description of the function:def related_objects(self, related, objs):
from versions.models import Versionable
related_model = related.related_model
if issubclass(related_model, Versionable):
qs = related_model.objects.current
else:
qs = related_model._base_manager.all()
return qs.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
|
[
"\n Gets a QuerySet of current objects related to ``objs`` via the\n relation ``related``.\n "
] |
Please provide a description of the function:def versionable_delete(self, instance, timestamp):
instance._delete_at(timestamp, using=self.using)
|
[
"\n Soft-deletes the instance, setting it's version_end_date to timestamp.\n\n Override this method to implement custom behaviour.\n\n :param Versionable instance:\n :param datetime timestamp:\n "
] |
Please provide a description of the function:def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
# CleanerVersion change 1: force the querytime to be the same as the
# prefetched-for instance.
# This is necessary to have reliable results and avoid extra queries
# for cache misses when accessing the child objects from their
# parents (e.g. choice.poll).
instance_querytime = instances[0]._querytime
if instance_querytime.active:
if queryset.querytime.active and \
queryset.querytime.time != instance_querytime.time:
raise ValueError(
"A Prefetch queryset that specifies an as_of time must "
"match the as_of of the base queryset.")
else:
queryset.querytime = instance_querytime
# CleanerVersion change 2: make rel_obj_attr return a tuple with
# the object's identity.
# rel_obj_attr = self.field.get_foreign_related_value
def versioned_fk_rel_obj_attr(versioned_rel_obj):
return versioned_rel_obj.identity,
rel_obj_attr = versioned_fk_rel_obj_attr
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
# CleanerVersion change 3: fake the related field so that it provides
# a name of 'identity'.
# related_field = self.field.foreign_related_fields[0]
related_field = namedtuple('VersionedRelatedFieldTuple', 'name')(
'identity')
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(
self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(
instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
if VERSION[:1] < (2,):
return (queryset, rel_obj_attr, instance_attr, True,
self.field.get_cache_name())
else:
return (queryset, rel_obj_attr, instance_attr, True,
self.field.get_cache_name(), False)
|
[
"\n Overrides the parent method to:\n - force queryset to use the querytime of the parent objects\n - ensure that the join is done on identity, not id\n - make the cache key identity, not id.\n "
] |
Please provide a description of the function:def get_current_m2m_diff(self, instance, new_objects):
new_ids = self.pks_from_objects(new_objects)
relation_manager = self.__get__(instance)
filter = Q(**{relation_manager.source_field.attname: instance.pk})
qs = self.through.objects.current.filter(filter)
try:
# Django 1.7
target_name = relation_manager.target_field.attname
except AttributeError:
# Django 1.6
target_name = relation_manager.through._meta.get_field_by_name(
relation_manager.target_field_name)[0].attname
current_ids = set(qs.values_list(target_name, flat=True))
being_removed = current_ids - new_ids
being_added = new_ids - current_ids
return list(being_removed), list(being_added)
|
[
"\n :param instance: Versionable object\n :param new_objects: objects which are about to be associated with\n instance\n :return: (being_removed id list, being_added id list)\n :rtype : tuple\n "
] |
Please provide a description of the function:def pks_from_objects(self, objects):
return {o.pk if isinstance(o, Model) else o for o in objects}
|
[
"\n Extract all the primary key strings from the given objects.\n Objects may be Versionables, or bare primary keys.\n\n :rtype : set\n "
] |
Please provide a description of the function:def fit(self, vecs, iter=20, seed=123):
assert vecs.dtype == np.float32
assert vecs.ndim == 2
N, D = vecs.shape
assert self.Ks < N, "the number of training vector should be more than Ks"
assert D % self.M == 0, "input dimension must be dividable by M"
self.Ds = int(D / self.M)
np.random.seed(seed)
if self.verbose:
print("iter: {}, seed: {}".format(iter, seed))
# [m][ks][ds]: m-th subspace, ks-the codeword, ds-th dim
self.codewords = np.zeros((self.M, self.Ks, self.Ds), dtype=np.float32)
for m in range(self.M):
if self.verbose:
print("Training the subspace: {} / {}".format(m, self.M))
vecs_sub = vecs[:, m * self.Ds : (m+1) * self.Ds]
self.codewords[m], _ = kmeans2(vecs_sub, self.Ks, iter=iter, minit='points')
return self
|
[
"Given training vectors, run k-means for each sub-space and create\n codewords for each sub-space.\n\n This function should be run once first of all.\n\n Args:\n vecs (np.ndarray): Training vectors with shape=(N, D) and dtype=np.float32.\n iter (int): The number of iteration for k-means\n seed (int): The seed for random process\n\n Returns:\n object: self\n\n "
] |
Please provide a description of the function:def encode(self, vecs):
assert vecs.dtype == np.float32
assert vecs.ndim == 2
N, D = vecs.shape
assert D == self.Ds * self.M, "input dimension must be Ds * M"
# codes[n][m] : code of n-th vec, m-th subspace
codes = np.empty((N, self.M), dtype=self.code_dtype)
for m in range(self.M):
if self.verbose:
print("Encoding the subspace: {} / {}".format(m, self.M))
vecs_sub = vecs[:, m * self.Ds : (m+1) * self.Ds]
codes[:, m], _ = vq(vecs_sub, self.codewords[m])
return codes
|
[
"Encode input vectors into PQ-codes.\n\n Args:\n vecs (np.ndarray): Input vectors with shape=(N, D) and dtype=np.float32.\n\n Returns:\n np.ndarray: PQ codes with shape=(N, M) and dtype=self.code_dtype\n\n "
] |
Please provide a description of the function:def decode(self, codes):
assert codes.ndim == 2
N, M = codes.shape
assert M == self.M
assert codes.dtype == self.code_dtype
vecs = np.empty((N, self.Ds * self.M), dtype=np.float32)
for m in range(self.M):
vecs[:, m * self.Ds : (m+1) * self.Ds] = self.codewords[m][codes[:, m], :]
return vecs
|
[
"Given PQ-codes, reconstruct original D-dimensional vectors\n approximately by fetching the codewords.\n\n Args:\n codes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype.\n Each row is a PQ-code\n\n Returns:\n np.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32\n\n "
] |
Please provide a description of the function:def dtable(self, query):
assert query.dtype == np.float32
assert query.ndim == 1, "input must be a single vector"
D, = query.shape
assert D == self.Ds * self.M, "input dimension must be Ds * M"
# dtable[m] : distance between m-th subvec and m-th codewords (m-th subspace)
# dtable[m][ks] : distance between m-th subvec and ks-th codeword of m-th codewords
dtable = np.empty((self.M, self.Ks), dtype=np.float32)
for m in range(self.M):
query_sub = query[m * self.Ds : (m+1) * self.Ds]
dtable[m, :] = np.linalg.norm(self.codewords[m] - query_sub, axis=1) ** 2
return DistanceTable(dtable)
|
[
"Compute a distance table for a query vector.\n The distances are computed by comparing each sub-vector of the query\n to the codewords for each sub-subspace.\n `dtable[m][ks]` contains the squared Euclidean distance between\n the `m`-th sub-vector of the query and the `ks`-th codeword\n for the `m`-th sub-space (`self.codewords[m][ks]`).\n\n Args:\n query (np.ndarray): Input vector with shape=(D, ) and dtype=np.float32\n\n Returns:\n nanopq.DistanceTable:\n Distance table. which contains\n dtable with shape=(M, Ks) and dtype=np.float32\n\n "
] |
Please provide a description of the function:def adist(self, codes):
assert codes.ndim == 2
N, M = codes.shape
assert M == self.dtable.shape[0]
# Fetch distance values using codes. The following codes are
dists = np.sum(self.dtable[range(M), codes], axis=1)
# The above line is equivalent to the followings:
# dists = np.zeros((N, )).astype(np.float32)
# for n in range(N):
# for m in range(M):
# dists[n] += self.dtable[m][codes[n][m]]
return dists
|
[
"Given PQ-codes, compute Asymmetric Distances between the query (self.dtable)\n and the PQ-codes.\n\n Args:\n codes (np.ndarray): PQ codes with shape=(N, M) and\n dtype=pq.code_dtype where pq is a pq instance that creates the codes\n\n Returns:\n np.ndarray: Asymmetric Distances with shape=(N, ) and dtype=np.float32\n\n "
] |
Please provide a description of the function:def fit(self, vecs, pq_iter=20, rotation_iter=10, seed=123):
assert vecs.dtype == np.float32
assert vecs.ndim == 2
_, D = vecs.shape
self.R = np.eye(D, dtype=np.float32)
for i in range(rotation_iter):
print("OPQ rotation training: {} / {}".format(i, rotation_iter))
X = vecs @ self.R
# (a) Train codewords
pq_tmp = PQ(M=self.M, Ks=self.Ks, verbose=self.verbose)
if i == rotation_iter - 1:
# In the final loop, run the full training
pq_tmp.fit(X, iter=pq_iter, seed=seed)
else:
# During the training for OPQ, just run one-pass (iter=1) PQ training
pq_tmp.fit(X, iter=1, seed=seed)
# (b) Update a rotation matrix R
X_ = pq_tmp.decode(pq_tmp.encode(X))
U, s, V = np.linalg.svd(vecs.T @ X_)
print("==== Reconstruction error:", np.linalg.norm(X - X_, 'fro'), "====")
if i == rotation_iter - 1:
self.pq = pq_tmp
break
else:
self.R = U @ V
return self
|
[
"Given training vectors, this function alternatively trains\n (a) codewords and (b) a rotation matrix.\n The procedure of training codewords is same as :func:`PQ.fit`.\n The rotation matrix is computed so as to minimize the quantization error\n given codewords (Orthogonal Procrustes problem)\n\n This function is a translation from the original MATLAB implementation to that of python\n http://kaiminghe.com/cvpr13/index.html\n\n If you find the error message is messy, please turn off the verbose flag, then\n you can see the reduction of error for each iteration clearly\n\n Args:\n vecs: (np.ndarray): Training vectors with shape=(N, D) and dtype=np.float32.\n pq_iter (int): The number of iteration for k-means\n rotation_iter (int): The number of iteration for leraning rotation\n seed (int): The seed for random process\n\n Returns:\n object: self\n\n "
] |
Please provide a description of the function:def rotate(self, vecs):
assert vecs.dtype == np.float32
assert vecs.ndim in [1, 2]
if vecs.ndim == 2:
return vecs @ self.R
elif vecs.ndim == 1:
return (vecs.reshape(1, -1) @ self.R).reshape(-1)
|
[
"Rotate input vector(s) by the rotation matrix.`\n\n Args:\n vecs (np.ndarray): Input vector(s) with dtype=np.float32.\n The shape can be a single vector (D, ) or several vectors (N, D)\n\n Returns:\n np.ndarray: Rotated vectors with the same shape and dtype to the input vecs.\n\n "
] |
Please provide a description of the function:def decode(self, codes):
# Because R is a rotation matrix (R^t * R = I), R^-1 should be R^t
return self.pq.decode(codes) @ self.R.T
|
[
"Given PQ-codes, reconstruct original D-dimensional vectors via :func:`PQ.decode`,\n and applying an inverse-rotation.\n\n Args:\n codes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype.\n Each row is a PQ-code\n\n Returns:\n np.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32\n\n "
] |
Please provide a description of the function:def transaction():
client = default_client()
_thread.client = client.pipeline()
try:
yield
_thread.client.execute()
finally:
_thread.client = client
|
[
"\n Swaps out the current client with a pipeline instance,\n so that each Redis method call inside the context will be\n pipelined. Once the context is exited, we execute the pipeline.\n "
] |
Please provide a description of the function:def _get_lua_path(self, name):
parts = (os.path.dirname(os.path.abspath(__file__)), "lua", name)
return os.path.join(*parts)
|
[
"\n Joins the given name with the relative path of the module.\n "
] |
Please provide a description of the function:def _get_lua_funcs(self):
with open(self._get_lua_path("atoms.lua")) as f:
for func in f.read().strip().split("function "):
if func:
bits = func.split("\n", 1)
name = bits[0].split("(")[0].strip()
snippet = bits[1].rsplit("end", 1)[0].strip()
yield name, snippet
|
[
"\n Returns the name / code snippet pair for each Lua function\n in the atoms.lua file.\n "
] |
Please provide a description of the function:def _create_lua_method(self, name, code):
script = self.register_script(code)
setattr(script, "name", name) # Helps debugging redis lib.
method = lambda key, *a, **k: script(keys=[key], args=a, **k)
setattr(self, name, method)
|
[
"\n Registers the code snippet as a Lua script, and binds the\n script to the client as a method that can be called with\n the same signature as regular client methods, eg with a\n single key arg.\n "
] |
Please provide a description of the function:def value_left(self, other):
return other.value if isinstance(other, self.__class__) else other
|
[
"\n Returns the value of the other type instance to use in an\n operator method, namely when the method's instance is on the\n left side of the expression.\n "
] |
Please provide a description of the function:def value_right(self, other):
return self if isinstance(other, self.__class__) else self.value
|
[
"\n Returns the value of the type instance calling an to use in an\n operator method, namely when the method's instance is on the\n right side of the expression.\n "
] |
Please provide a description of the function:def op_left(op):
def method(self, other):
return op(self.value, value_left(self, other))
return method
|
[
"\n Returns a type instance method for the given operator, applied\n when the instance appears on the left side of the expression.\n "
] |
Please provide a description of the function:def op_right(op):
def method(self, other):
return op(value_left(self, other), value_right(self, other))
return method
|
[
"\n Returns a type instance method for the given operator, applied\n when the instance appears on the right side of the expression.\n "
] |
Please provide a description of the function:def inplace(method_name):
def method(self, other):
getattr(self, method_name)(value_left(self, other))
return self
return method
|
[
"\n Returns a type instance method that will call the given method\n name, used for inplace operators such as __iadd__ and __imul__.\n "
] |
Please provide a description of the function:def on(self, event, f=None):
def _on(f):
self._add_event_handler(event, f, f)
return f
if f is None:
return _on
else:
return _on(f)
|
[
"Registers the function ``f`` to the event name ``event``.\n\n If ``f`` isn't provided, this method returns a function that\n takes ``f`` as a callback; in other words, you can use this method\n as a decorator, like so::\n\n @ee.on('data')\n def data_handler(data):\n print(data)\n\n In both the decorated and undecorated forms, the event handler is\n returned. The upshot of this is that you can call decorated handlers\n directly, as well as use them in remove_listener calls.\n "
] |
Please provide a description of the function:def emit(self, event, *args, **kwargs):
handled = False
for f in list(self._events[event].values()):
self._emit_run(f, args, kwargs)
handled = True
if not handled:
self._emit_handle_potential_error(event, args[0] if args else None)
return handled
|
[
"Emit ``event``, passing ``*args`` and ``**kwargs`` to each attached\n function. Returns ``True`` if any functions are attached to ``event``;\n otherwise returns ``False``.\n\n Example::\n\n ee.emit('data', '00101001')\n\n Assuming ``data`` is an attached function, this will call\n ``data('00101001')'``.\n "
] |
Please provide a description of the function:def once(self, event, f=None):
def _wrapper(f):
def g(*args, **kwargs):
self.remove_listener(event, f)
# f may return a coroutine, so we need to return that
# result here so that emit can schedule it
return f(*args, **kwargs)
self._add_event_handler(event, f, g)
return f
if f is None:
return _wrapper
else:
return _wrapper(f)
|
[
"The same as ``ee.on``, except that the listener is automatically\n removed after being called.\n "
] |
Please provide a description of the function:def remove_all_listeners(self, event=None):
if event is not None:
self._events[event] = OrderedDict()
else:
self._events = defaultdict(OrderedDict)
|
[
"Remove all listeners attached to ``event``.\n If ``event`` is ``None``, remove all listeners on all events.\n "
] |
Please provide a description of the function:def offsetcopy(s, newoffset):
assert 0 <= newoffset < 8
if not s.bitlength:
return copy.copy(s)
else:
if newoffset == s.offset % 8:
return ByteStore(s.getbyteslice(s.byteoffset, s.byteoffset + s.bytelength), s.bitlength, newoffset)
newdata = []
d = s._rawarray
assert newoffset != s.offset % 8
if newoffset < s.offset % 8:
# We need to shift everything left
shiftleft = s.offset % 8 - newoffset
# First deal with everything except for the final byte
for x in range(s.byteoffset, s.byteoffset + s.bytelength - 1):
newdata.append(((d[x] << shiftleft) & 0xff) +\
(d[x + 1] >> (8 - shiftleft)))
bits_in_last_byte = (s.offset + s.bitlength) % 8
if not bits_in_last_byte:
bits_in_last_byte = 8
if bits_in_last_byte > shiftleft:
newdata.append((d[s.byteoffset + s.bytelength - 1] << shiftleft) & 0xff)
else: # newoffset > s._offset % 8
shiftright = newoffset - s.offset % 8
newdata.append(s.getbyte(0) >> shiftright)
for x in range(s.byteoffset + 1, s.byteoffset + s.bytelength):
newdata.append(((d[x - 1] << (8 - shiftright)) & 0xff) +\
(d[x] >> shiftright))
bits_in_last_byte = (s.offset + s.bitlength) % 8
if not bits_in_last_byte:
bits_in_last_byte = 8
if bits_in_last_byte + shiftright > 8:
newdata.append((d[s.byteoffset + s.bytelength - 1] << (8 - shiftright)) & 0xff)
new_s = ByteStore(bytearray(newdata), s.bitlength, newoffset)
assert new_s.offset == newoffset
return new_s
|
[
"Return a copy of a ByteStore with the newoffset.\n\n Not part of public interface.\n "
] |
Please provide a description of the function:def equal(a, b):
# We want to return False for inequality as soon as possible, which
# means we get lots of special cases.
# First the easy one - compare lengths:
a_bitlength = a.bitlength
b_bitlength = b.bitlength
if a_bitlength != b_bitlength:
return False
if not a_bitlength:
assert b_bitlength == 0
return True
# Make 'a' the one with the smaller offset
if (a.offset % 8) > (b.offset % 8):
a, b = b, a
# and create some aliases
a_bitoff = a.offset % 8
b_bitoff = b.offset % 8
a_byteoffset = a.byteoffset
b_byteoffset = b.byteoffset
a_bytelength = a.bytelength
b_bytelength = b.bytelength
da = a._rawarray
db = b._rawarray
# If they are pointing to the same data, they must be equal
if da is db and a.offset == b.offset:
return True
if a_bitoff == b_bitoff:
bits_spare_in_last_byte = 8 - (a_bitoff + a_bitlength) % 8
if bits_spare_in_last_byte == 8:
bits_spare_in_last_byte = 0
# Special case for a, b contained in a single byte
if a_bytelength == 1:
a_val = ((da[a_byteoffset] << a_bitoff) & 0xff) >> (8 - a_bitlength)
b_val = ((db[b_byteoffset] << b_bitoff) & 0xff) >> (8 - b_bitlength)
return a_val == b_val
# Otherwise check first byte
if da[a_byteoffset] & (0xff >> a_bitoff) != db[b_byteoffset] & (0xff >> b_bitoff):
return False
# then everything up to the last
b_a_offset = b_byteoffset - a_byteoffset
for x in range(1 + a_byteoffset, a_byteoffset + a_bytelength - 1):
if da[x] != db[b_a_offset + x]:
return False
# and finally the last byte
return (da[a_byteoffset + a_bytelength - 1] >> bits_spare_in_last_byte ==
db[b_byteoffset + b_bytelength - 1] >> bits_spare_in_last_byte)
assert a_bitoff != b_bitoff
# This is how much we need to shift a to the right to compare with b:
shift = b_bitoff - a_bitoff
# Special case for b only one byte long
if b_bytelength == 1:
assert a_bytelength == 1
a_val = ((da[a_byteoffset] << a_bitoff) & 0xff) >> (8 - a_bitlength)
b_val = ((db[b_byteoffset] << b_bitoff) & 0xff) >> (8 - b_bitlength)
return a_val == b_val
# Special case for a only one byte long
if a_bytelength == 1:
assert b_bytelength == 2
a_val = ((da[a_byteoffset] << a_bitoff) & 0xff) >> (8 - a_bitlength)
b_val = ((db[b_byteoffset] << 8) + db[b_byteoffset + 1]) << b_bitoff
b_val &= 0xffff
b_val >>= 16 - b_bitlength
return a_val == b_val
# Compare first byte of b with bits from first byte of a
if (da[a_byteoffset] & (0xff >> a_bitoff)) >> shift != db[b_byteoffset] & (0xff >> b_bitoff):
return False
# Now compare every full byte of b with bits from 2 bytes of a
for x in range(1, b_bytelength - 1):
# Construct byte from 2 bytes in a to compare to byte in b
b_val = db[b_byteoffset + x]
a_val = ((da[a_byteoffset + x - 1] << 8) + da[a_byteoffset + x]) >> shift
a_val &= 0xff
if a_val != b_val:
return False
# Now check bits in final byte of b
final_b_bits = (b.offset + b_bitlength) % 8
if not final_b_bits:
final_b_bits = 8
b_val = db[b_byteoffset + b_bytelength - 1] >> (8 - final_b_bits)
final_a_bits = (a.offset + a_bitlength) % 8
if not final_a_bits:
final_a_bits = 8
if b.bytelength > a_bytelength:
assert b_bytelength == a_bytelength + 1
a_val = da[a_byteoffset + a_bytelength - 1] >> (8 - final_a_bits)
a_val &= 0xff >> (8 - final_b_bits)
return a_val == b_val
assert a_bytelength == b_bytelength
a_val = da[a_byteoffset + a_bytelength - 2] << 8
a_val += da[a_byteoffset + a_bytelength - 1]
a_val >>= (8 - final_a_bits)
a_val &= 0xff >> (8 - final_b_bits)
return a_val == b_val
|
[
"Return True if ByteStores a == b.\n\n Not part of public interface.\n "
] |
Please provide a description of the function:def structparser(token):
m = STRUCT_PACK_RE.match(token)
if not m:
return [token]
else:
endian = m.group('endian')
if endian is None:
return [token]
# Split the format string into a list of 'q', '4h' etc.
formatlist = re.findall(STRUCT_SPLIT_RE, m.group('fmt'))
# Now deal with mulitiplicative factors, 4h -> hhhh etc.
fmt = ''.join([f[-1] * int(f[:-1]) if len(f) != 1 else
f for f in formatlist])
if endian == '@':
# Native endianness
if byteorder == 'little':
endian = '<'
else:
assert byteorder == 'big'
endian = '>'
if endian == '<':
tokens = [REPLACEMENTS_LE[c] for c in fmt]
else:
assert endian == '>'
tokens = [REPLACEMENTS_BE[c] for c in fmt]
return tokens
|
[
"Parse struct-like format string token into sub-token list."
] |
Please provide a description of the function:def tokenparser(fmt, keys=None, token_cache={}):
try:
return token_cache[(fmt, keys)]
except KeyError:
token_key = (fmt, keys)
# Very inefficient expanding of brackets.
fmt = expand_brackets(fmt)
# Split tokens by ',' and remove whitespace
# The meta_tokens can either be ordinary single tokens or multiple
# struct-format token strings.
meta_tokens = (''.join(f.split()) for f in fmt.split(','))
return_values = []
stretchy_token = False
for meta_token in meta_tokens:
# See if it has a multiplicative factor
m = MULTIPLICATIVE_RE.match(meta_token)
if not m:
factor = 1
else:
factor = int(m.group('factor'))
meta_token = m.group('token')
# See if it's a struct-like format
tokens = structparser(meta_token)
ret_vals = []
for token in tokens:
if keys and token in keys:
# Don't bother parsing it, it's a keyword argument
ret_vals.append([token, None, None])
continue
value = length = None
if token == '':
continue
# Match literal tokens of the form 0x... 0o... and 0b...
m = LITERAL_RE.match(token)
if m:
name = m.group('name')
value = m.group('value')
ret_vals.append([name, length, value])
continue
# Match everything else:
m1 = TOKEN_RE.match(token)
if not m1:
# and if you don't specify a 'name' then the default is 'uint':
m2 = DEFAULT_UINT.match(token)
if not m2:
raise ValueError("Don't understand token '{0}'.".format(token))
if m1:
name = m1.group('name')
length = m1.group('len')
if m1.group('value'):
value = m1.group('value')
else:
assert m2
name = 'uint'
length = m2.group('len')
if m2.group('value'):
value = m2.group('value')
if name == 'bool':
if length is not None:
raise ValueError("You can't specify a length with bool tokens - they are always one bit.")
length = 1
if length is None and name not in ('se', 'ue', 'sie', 'uie'):
stretchy_token = True
if length is not None:
# Try converting length to int, otherwise check it's a key.
try:
length = int(length)
if length < 0:
raise Error
# For the 'bytes' token convert length to bits.
if name == 'bytes':
length *= 8
except Error:
raise ValueError("Can't read a token with a negative length.")
except ValueError:
if not keys or length not in keys:
raise ValueError("Don't understand length '{0}' of token.".format(length))
ret_vals.append([name, length, value])
# This multiplies by the multiplicative factor, but this means that
# we can't allow keyword values as multipliers (e.g. n*uint:8).
# The only way to do this would be to return the factor in some fashion
# (we can't use the key's value here as it would mean that we couldn't
# sensibly continue to cache the function's results. (TODO).
return_values.extend(ret_vals * factor)
return_values = [tuple(x) for x in return_values]
if len(token_cache) < CACHE_SIZE:
token_cache[token_key] = stretchy_token, return_values
return stretchy_token, return_values
|
[
"Divide the format string into tokens and parse them.\n\n Return stretchy token and list of [initialiser, length, value]\n initialiser is one of: hex, oct, bin, uint, int, se, ue, 0x, 0o, 0b etc.\n length is None if not known, as is value.\n\n If the token is in the keyword dictionary (keys) then it counts as a\n special case and isn't messed with.\n\n tokens must be of the form: [factor*][initialiser][:][length][=value]\n\n "
] |
Please provide a description of the function:def expand_brackets(s):
s = ''.join(s.split())
while True:
start = s.find('(')
if start == -1:
break
count = 1 # Number of hanging open brackets
p = start + 1
while p < len(s):
if s[p] == '(':
count += 1
if s[p] == ')':
count -= 1
if not count:
break
p += 1
if count:
raise ValueError("Unbalanced parenthesis in '{0}'.".format(s))
if start == 0 or s[start - 1] != '*':
s = s[0:start] + s[start + 1:p] + s[p + 1:]
else:
m = BRACKET_RE.search(s)
if m:
factor = int(m.group('factor'))
matchstart = m.start('factor')
s = s[0:matchstart] + (factor - 1) * (s[start + 1:p] + ',') + s[start + 1:p] + s[p + 1:]
else:
raise ValueError("Failed to parse '{0}'.".format(s))
return s
|
[
"Remove whitespace and expand all brackets."
] |
Please provide a description of the function:def pack(fmt, *values, **kwargs):
tokens = []
if isinstance(fmt, basestring):
fmt = [fmt]
try:
for f_item in fmt:
_, tkns = tokenparser(f_item, tuple(sorted(kwargs.keys())))
tokens.extend(tkns)
except ValueError as e:
raise CreationError(*e.args)
value_iter = iter(values)
s = BitStream()
try:
for name, length, value in tokens:
# If the value is in the kwd dictionary then it takes precedence.
if value in kwargs:
value = kwargs[value]
# If the length is in the kwd dictionary then use that too.
if length in kwargs:
length = kwargs[length]
# Also if we just have a dictionary name then we want to use it
if name in kwargs and length is None and value is None:
s.append(kwargs[name])
continue
if length is not None:
length = int(length)
if value is None and name != 'pad':
# Take the next value from the ones provided
value = next(value_iter)
s._append(BitStream._init_with_token(name, length, value))
except StopIteration:
raise CreationError("Not enough parameters present to pack according to the "
"format. {0} values are needed.", len(tokens))
try:
next(value_iter)
except StopIteration:
# Good, we've used up all the *values.
return s
raise CreationError("Too many parameters present to pack according to the format.")
|
[
"Pack the values according to the format string and return a new BitStream.\n\n fmt -- A single string or a list of strings with comma separated tokens\n describing how to create the BitStream.\n values -- Zero or more values to pack according to the format.\n kwargs -- A dictionary or keyword-value pairs - the keywords used in the\n format string will be replaced with their given value.\n\n Token examples: 'int:12' : 12 bits as a signed integer\n 'uint:8' : 8 bits as an unsigned integer\n 'float:64' : 8 bytes as a big-endian float\n 'intbe:16' : 2 bytes as a big-endian signed integer\n 'uintbe:16' : 2 bytes as a big-endian unsigned integer\n 'intle:32' : 4 bytes as a little-endian signed integer\n 'uintle:32' : 4 bytes as a little-endian unsigned integer\n 'floatle:64': 8 bytes as a little-endian float\n 'intne:24' : 3 bytes as a native-endian signed integer\n 'uintne:24' : 3 bytes as a native-endian unsigned integer\n 'floatne:32': 4 bytes as a native-endian float\n 'hex:80' : 80 bits as a hex string\n 'oct:9' : 9 bits as an octal string\n 'bin:1' : single bit binary string\n 'ue' / 'uie': next bits as unsigned exp-Golomb code\n 'se' / 'sie': next bits as signed exp-Golomb code\n 'bits:5' : 5 bits as a bitstring object\n 'bytes:10' : 10 bytes as a bytes object\n 'bool' : 1 bit as a bool\n 'pad:3' : 3 zero bits as padding\n\n >>> s = pack('uint:12, bits', 100, '0xffe')\n >>> t = pack(['bits', 'bin:3'], s, '111')\n >>> u = pack('uint:8=a, uint:8=b, uint:55=a', a=6, b=44)\n\n "
] |
Please provide a description of the function:def getbyteslice(self, start, end):
c = self._rawarray[start:end]
return c
|
[
"Direct access to byte data."
] |
Please provide a description of the function:def _appendstore(self, store):
if not store.bitlength:
return
# Set new array offset to the number of bits in the final byte of current array.
store = offsetcopy(store, (self.offset + self.bitlength) % 8)
if store.offset:
# first do the byte with the join.
joinval = (self._rawarray.pop() & (255 ^ (255 >> store.offset)) |
(store.getbyte(0) & (255 >> store.offset)))
self._rawarray.append(joinval)
self._rawarray.extend(store._rawarray[1:])
else:
self._rawarray.extend(store._rawarray)
self.bitlength += store.bitlength
|
[
"Join another store on to the end of this one."
] |
Please provide a description of the function:def _prependstore(self, store):
if not store.bitlength:
return
# Set the offset of copy of store so that it's final byte
# ends in a position that matches the offset of self,
# then join self on to the end of it.
store = offsetcopy(store, (self.offset - store.bitlength) % 8)
assert (store.offset + store.bitlength) % 8 == self.offset % 8
bit_offset = self.offset % 8
if bit_offset:
# first do the byte with the join.
store.setbyte(-1, (store.getbyte(-1) & (255 ^ (255 >> bit_offset)) | \
(self._rawarray[self.byteoffset] & (255 >> bit_offset))))
store._rawarray.extend(self._rawarray[self.byteoffset + 1: self.byteoffset + self.bytelength])
else:
store._rawarray.extend(self._rawarray[self.byteoffset: self.byteoffset + self.bytelength])
self._rawarray = store._rawarray
self.offset = store.offset
self.bitlength += store.bitlength
|
[
"Join another store on to the start of this one."
] |
Please provide a description of the function:def _assertsanity(self):
assert self.len >= 0
assert 0 <= self._offset, "offset={0}".format(self._offset)
assert (self.len + self._offset + 7) // 8 == self._datastore.bytelength + self._datastore.byteoffset
return True
|
[
"Check internal self consistency as a debugging aid."
] |
Please provide a description of the function:def _setauto(self, s, length, offset):
# As s can be so many different things it's important to do the checks
# in the correct order, as some types are also other allowed types.
# So basestring must be checked before Iterable
# and bytes/bytearray before Iterable but after basestring!
if isinstance(s, Bits):
if length is None:
length = s.len - offset
self._setbytes_unsafe(s._datastore.rawbytes, length, s._offset + offset)
return
if isinstance(s, file):
if offset is None:
offset = 0
if length is None:
length = os.path.getsize(s.name) * 8 - offset
byteoffset, offset = divmod(offset, 8)
bytelength = (length + byteoffset * 8 + offset + 7) // 8 - byteoffset
m = MmapByteArray(s, bytelength, byteoffset)
if length + byteoffset * 8 + offset > m.filelength * 8:
raise CreationError("File is not long enough for specified "
"length and offset.")
self._datastore = ConstByteStore(m, length, offset)
return
if length is not None:
raise CreationError("The length keyword isn't applicable to this initialiser.")
if offset:
raise CreationError("The offset keyword isn't applicable to this initialiser.")
if isinstance(s, basestring):
bs = self._converttobitstring(s)
assert bs._offset == 0
self._setbytes_unsafe(bs._datastore.rawbytes, bs.length, 0)
return
if isinstance(s, (bytes, bytearray)):
self._setbytes_unsafe(bytearray(s), len(s) * 8, 0)
return
if isinstance(s, array.array):
b = s.tostring()
self._setbytes_unsafe(bytearray(b), len(b) * 8, 0)
return
if isinstance(s, numbers.Integral):
# Initialise with s zero bits.
if s < 0:
msg = "Can't create bitstring of negative length {0}."
raise CreationError(msg, s)
data = bytearray((s + 7) // 8)
self._datastore = ByteStore(data, s, 0)
return
if isinstance(s, collections.Iterable):
# Evaluate each item as True or False and set bits to 1 or 0.
self._setbin_unsafe(''.join(str(int(bool(x))) for x in s))
return
raise TypeError("Cannot initialise bitstring from {0}.".format(type(s)))
|
[
"Set bitstring from a bitstring, file, bool, integer, array, iterable or string."
] |
Please provide a description of the function:def _setfile(self, filename, length, offset):
source = open(filename, 'rb')
if offset is None:
offset = 0
if length is None:
length = os.path.getsize(source.name) * 8 - offset
byteoffset, offset = divmod(offset, 8)
bytelength = (length + byteoffset * 8 + offset + 7) // 8 - byteoffset
m = MmapByteArray(source, bytelength, byteoffset)
if length + byteoffset * 8 + offset > m.filelength * 8:
raise CreationError("File is not long enough for specified "
"length and offset.")
self._datastore = ConstByteStore(m, length, offset)
|
[
"Use file as source of bits."
] |
Please provide a description of the function:def _setbytes_safe(self, data, length=None, offset=0):
data = bytearray(data)
if length is None:
# Use to the end of the data
length = len(data)*8 - offset
self._datastore = ByteStore(data, length, offset)
else:
if length + offset > len(data) * 8:
msg = "Not enough data present. Need {0} bits, have {1}."
raise CreationError(msg, length + offset, len(data) * 8)
if length == 0:
self._datastore = ByteStore(bytearray(0))
else:
self._datastore = ByteStore(data, length, offset)
|
[
"Set the data from a string."
] |
Please provide a description of the function:def _setbytes_unsafe(self, data, length, offset):
self._datastore = ByteStore(data[:], length, offset)
assert self._assertsanity()
|
[
"Unchecked version of _setbytes_safe."
] |
Please provide a description of the function:def _readbytes(self, length, start):
assert length % 8 == 0
assert start + length <= self.len
if not (start + self._offset) % 8:
return bytes(self._datastore.getbyteslice((start + self._offset) // 8,
(start + self._offset + length) // 8))
return self._slice(start, start + length).tobytes()
|
[
"Read bytes and return them. Note that length is in bits."
] |
Please provide a description of the function:def _setuint(self, uint, length=None):
try:
if length is None:
# Use the whole length. Deliberately not using .len here.
length = self._datastore.bitlength
except AttributeError:
# bitstring doesn't have a _datastore as it hasn't been created!
pass
# TODO: All this checking code should be hoisted out of here!
if length is None or length == 0:
raise CreationError("A non-zero length must be specified with a "
"uint initialiser.")
if uint >= (1 << length):
msg = "{0} is too large an unsigned integer for a bitstring of length {1}. "\
"The allowed range is [0, {2}]."
raise CreationError(msg, uint, length, (1 << length) - 1)
if uint < 0:
raise CreationError("uint cannot be initialsed by a negative number.")
s = hex(uint)[2:]
s = s.rstrip('L')
if len(s) & 1:
s = '0' + s
try:
data = bytes.fromhex(s)
except AttributeError:
# the Python 2.x way
data = binascii.unhexlify(s)
# Now add bytes as needed to get the right length.
extrabytes = ((length + 7) // 8) - len(data)
if extrabytes > 0:
data = b'\x00' * extrabytes + data
offset = 8 - (length % 8)
if offset == 8:
offset = 0
self._setbytes_unsafe(bytearray(data), length, offset)
|
[
"Reset the bitstring to have given unsigned int interpretation."
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.