body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def label(self, name): 'Return the full SELECT statement represented by this\n :class:`.Query`, converted\n to a scalar subquery with a label of the given name.\n\n Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.\n\n .. versionadded:: 0.6.5\n\n ' return self.enable_eagerloads(False).statement.label(name)
-3,590,657,937,570,311,000
Return the full SELECT statement represented by this :class:`.Query`, converted to a scalar subquery with a label of the given name. Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`. .. versionadded:: 0.6.5
lib/sqlalchemy/orm/query.py
label
slafs/sqlalchemy
python
def label(self, name): 'Return the full SELECT statement represented by this\n :class:`.Query`, converted\n to a scalar subquery with a label of the given name.\n\n Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.\n\n .. versionadded:: 0.6.5\n\n ' return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self): 'Return the full SELECT statement represented by this\n :class:`.Query`, converted to a scalar subquery.\n\n Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.\n\n .. versionadded:: 0.6.5\n\n ' return self.enable_eagerloads(False).statement.as_scalar()
1,437,486,943,326,028,500
Return the full SELECT statement represented by this :class:`.Query`, converted to a scalar subquery. Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`. .. versionadded:: 0.6.5
lib/sqlalchemy/orm/query.py
as_scalar
slafs/sqlalchemy
python
def as_scalar(self): 'Return the full SELECT statement represented by this\n :class:`.Query`, converted to a scalar subquery.\n\n Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.\n\n .. versionadded:: 0.6.5\n\n ' return self.enable_eagerloads(False).statement.as_scalar()
@property def selectable(self): 'Return the :class:`.Select` object emitted by this :class:`.Query`.\n\n Used for :func:`.inspect` compatibility, this is equivalent to::\n\n query.enable_eagerloads(False).with_labels().statement\n\n ' return self.__clause_element__()
-8,503,839,907,772,944,000
Return the :class:`.Select` object emitted by this :class:`.Query`. Used for :func:`.inspect` compatibility, this is equivalent to:: query.enable_eagerloads(False).with_labels().statement
lib/sqlalchemy/orm/query.py
selectable
slafs/sqlalchemy
python
@property def selectable(self): 'Return the :class:`.Select` object emitted by this :class:`.Query`.\n\n Used for :func:`.inspect` compatibility, this is equivalent to::\n\n query.enable_eagerloads(False).with_labels().statement\n\n ' return self.__clause_element__()
@_generative() def enable_eagerloads(self, value): "Control whether or not eager joins and subqueries are\n rendered.\n\n When set to False, the returned Query will not render\n eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,\n :func:`~sqlalchemy.orm.subqueryload` options\n or mapper-level ``lazy='joined'``/``lazy='subquery'``\n configurations.\n\n This is used primarily when nesting the Query's\n statement into a subquery or other\n selectable, or when using :meth:`.Query.yield_per`.\n\n " self._enable_eagerloads = value
-317,527,578,000,730,430
Control whether or not eager joins and subqueries are rendered. When set to False, the returned Query will not render eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.subqueryload` options or mapper-level ``lazy='joined'``/``lazy='subquery'`` configurations. This is used primarily when nesting the Query's statement into a subquery or other selectable, or when using :meth:`.Query.yield_per`.
lib/sqlalchemy/orm/query.py
enable_eagerloads
slafs/sqlalchemy
python
@_generative() def enable_eagerloads(self, value): "Control whether or not eager joins and subqueries are\n rendered.\n\n When set to False, the returned Query will not render\n eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,\n :func:`~sqlalchemy.orm.subqueryload` options\n or mapper-level ``lazy='joined'``/``lazy='subquery'``\n configurations.\n\n This is used primarily when nesting the Query's\n statement into a subquery or other\n selectable, or when using :meth:`.Query.yield_per`.\n\n " self._enable_eagerloads = value
@_generative() def with_labels(self): "Apply column labels to the return value of Query.statement.\n\n Indicates that this Query's `statement` accessor should return\n a SELECT statement that applies labels to all columns in the\n form <tablename>_<columnname>; this is commonly used to\n disambiguate columns from multiple tables which have the same\n name.\n\n When the `Query` actually issues SQL to load rows, it always\n uses column labeling.\n\n " self._with_labels = True
-6,678,946,715,591,037,000
Apply column labels to the return value of Query.statement. Indicates that this Query's `statement` accessor should return a SELECT statement that applies labels to all columns in the form <tablename>_<columnname>; this is commonly used to disambiguate columns from multiple tables which have the same name. When the `Query` actually issues SQL to load rows, it always uses column labeling.
lib/sqlalchemy/orm/query.py
with_labels
slafs/sqlalchemy
python
@_generative() def with_labels(self): "Apply column labels to the return value of Query.statement.\n\n Indicates that this Query's `statement` accessor should return\n a SELECT statement that applies labels to all columns in the\n form <tablename>_<columnname>; this is commonly used to\n disambiguate columns from multiple tables which have the same\n name.\n\n When the `Query` actually issues SQL to load rows, it always\n uses column labeling.\n\n " self._with_labels = True
@_generative() def enable_assertions(self, value): 'Control whether assertions are generated.\n\n When set to False, the returned Query will\n not assert its state before certain operations,\n including that LIMIT/OFFSET has not been applied\n when filter() is called, no criterion exists\n when get() is called, and no "from_statement()"\n exists when filter()/order_by()/group_by() etc.\n is called. This more permissive mode is used by\n custom Query subclasses to specify criterion or\n other modifiers outside of the usual usage patterns.\n\n Care should be taken to ensure that the usage\n pattern is even possible. A statement applied\n by from_statement() will override any criterion\n set by filter() or order_by(), for example.\n\n ' self._enable_assertions = value
-6,685,429,428,474,270,000
Control whether assertions are generated. When set to False, the returned Query will not assert its state before certain operations, including that LIMIT/OFFSET has not been applied when filter() is called, no criterion exists when get() is called, and no "from_statement()" exists when filter()/order_by()/group_by() etc. is called. This more permissive mode is used by custom Query subclasses to specify criterion or other modifiers outside of the usual usage patterns. Care should be taken to ensure that the usage pattern is even possible. A statement applied by from_statement() will override any criterion set by filter() or order_by(), for example.
lib/sqlalchemy/orm/query.py
enable_assertions
slafs/sqlalchemy
python
@_generative() def enable_assertions(self, value): 'Control whether assertions are generated.\n\n When set to False, the returned Query will\n not assert its state before certain operations,\n including that LIMIT/OFFSET has not been applied\n when filter() is called, no criterion exists\n when get() is called, and no "from_statement()"\n exists when filter()/order_by()/group_by() etc.\n is called. This more permissive mode is used by\n custom Query subclasses to specify criterion or\n other modifiers outside of the usual usage patterns.\n\n Care should be taken to ensure that the usage\n pattern is even possible. A statement applied\n by from_statement() will override any criterion\n set by filter() or order_by(), for example.\n\n ' self._enable_assertions = value
@property def whereclause(self): 'A readonly attribute which returns the current WHERE criterion for\n this Query.\n\n This returned value is a SQL expression construct, or ``None`` if no\n criterion has been established.\n\n ' return self._criterion
-7,438,584,989,382,183,000
A readonly attribute which returns the current WHERE criterion for this Query. This returned value is a SQL expression construct, or ``None`` if no criterion has been established.
lib/sqlalchemy/orm/query.py
whereclause
slafs/sqlalchemy
python
@property def whereclause(self): 'A readonly attribute which returns the current WHERE criterion for\n this Query.\n\n This returned value is a SQL expression construct, or ``None`` if no\n criterion has been established.\n\n ' return self._criterion
@_generative() def _with_current_path(self, path): 'indicate that this query applies to objects loaded\n within a certain path.\n\n Used by deferred loaders (see strategies.py) which transfer\n query options from an originating query to a newly generated\n query intended for the deferred load.\n\n ' self._current_path = path
-7,653,136,271,238,835,000
indicate that this query applies to objects loaded within a certain path. Used by deferred loaders (see strategies.py) which transfer query options from an originating query to a newly generated query intended for the deferred load.
lib/sqlalchemy/orm/query.py
_with_current_path
slafs/sqlalchemy
python
@_generative() def _with_current_path(self, path): 'indicate that this query applies to objects loaded\n within a certain path.\n\n Used by deferred loaders (see strategies.py) which transfer\n query options from an originating query to a newly generated\n query intended for the deferred load.\n\n ' self._current_path = path
@_generative(_no_clauseelement_condition) def with_polymorphic(self, cls_or_mappers, selectable=None, polymorphic_on=None): 'Load columns for inheriting classes.\n\n :meth:`.Query.with_polymorphic` applies transformations\n to the "main" mapped class represented by this :class:`.Query`.\n The "main" mapped class here means the :class:`.Query`\n object\'s first argument is a full class, i.e.\n ``session.query(SomeClass)``. These transformations allow additional\n tables to be present in the FROM clause so that columns for a\n joined-inheritance subclass are available in the query, both for the\n purposes of load-time efficiency as well as the ability to use\n these columns at query time.\n\n See the documentation section :ref:`with_polymorphic` for\n details on how this method is used.\n\n .. versionchanged:: 0.8\n A new and more flexible function\n :func:`.orm.with_polymorphic` supersedes\n :meth:`.Query.with_polymorphic`, as it can apply the equivalent\n functionality to any set of columns or classes in the\n :class:`.Query`, not just the "zero mapper". See that\n function for a description of arguments.\n\n ' if (not self._primary_entity): raise sa_exc.InvalidRequestError('No primary mapper set up for this Query.') entity = self._entities[0]._clone() self._entities = ([entity] + self._entities[1:]) entity.set_with_polymorphic(self, cls_or_mappers, selectable=selectable, polymorphic_on=polymorphic_on)
3,415,142,683,067,965,000
Load columns for inheriting classes. :meth:`.Query.with_polymorphic` applies transformations to the "main" mapped class represented by this :class:`.Query`. The "main" mapped class here means the :class:`.Query` object's first argument is a full class, i.e. ``session.query(SomeClass)``. These transformations allow additional tables to be present in the FROM clause so that columns for a joined-inheritance subclass are available in the query, both for the purposes of load-time efficiency as well as the ability to use these columns at query time. See the documentation section :ref:`with_polymorphic` for details on how this method is used. .. versionchanged:: 0.8 A new and more flexible function :func:`.orm.with_polymorphic` supersedes :meth:`.Query.with_polymorphic`, as it can apply the equivalent functionality to any set of columns or classes in the :class:`.Query`, not just the "zero mapper". See that function for a description of arguments.
lib/sqlalchemy/orm/query.py
with_polymorphic
slafs/sqlalchemy
python
@_generative(_no_clauseelement_condition) def with_polymorphic(self, cls_or_mappers, selectable=None, polymorphic_on=None): 'Load columns for inheriting classes.\n\n :meth:`.Query.with_polymorphic` applies transformations\n to the "main" mapped class represented by this :class:`.Query`.\n The "main" mapped class here means the :class:`.Query`\n object\'s first argument is a full class, i.e.\n ``session.query(SomeClass)``. These transformations allow additional\n tables to be present in the FROM clause so that columns for a\n joined-inheritance subclass are available in the query, both for the\n purposes of load-time efficiency as well as the ability to use\n these columns at query time.\n\n See the documentation section :ref:`with_polymorphic` for\n details on how this method is used.\n\n .. versionchanged:: 0.8\n A new and more flexible function\n :func:`.orm.with_polymorphic` supersedes\n :meth:`.Query.with_polymorphic`, as it can apply the equivalent\n functionality to any set of columns or classes in the\n :class:`.Query`, not just the "zero mapper". See that\n function for a description of arguments.\n\n ' if (not self._primary_entity): raise sa_exc.InvalidRequestError('No primary mapper set up for this Query.') entity = self._entities[0]._clone() self._entities = ([entity] + self._entities[1:]) entity.set_with_polymorphic(self, cls_or_mappers, selectable=selectable, polymorphic_on=polymorphic_on)
@_generative() def yield_per(self, count): "Yield only ``count`` rows at a time.\n\n The purpose of this method is when fetching very large result sets\n (> 10K rows), to batch results in sub-collections and yield them\n out partially, so that the Python interpreter doesn't need to declare\n very large areas of memory which is both time consuming and leads\n to excessive memory use. The performance from fetching hundreds of\n thousands of rows can often double when a suitable yield-per setting\n (e.g. approximately 1000) is used, even with DBAPIs that buffer\n rows (which are most).\n\n The :meth:`.Query.yield_per` method **is not compatible with most\n eager loading schemes, including subqueryload and joinedload with\n collections**. For this reason, it may be helpful to disable\n eager loads, either unconditionally with\n :meth:`.Query.enable_eagerloads`::\n\n q = sess.query(Object).yield_per(100).enable_eagerloads(False)\n\n Or more selectively using :func:`.lazyload`; such as with\n an asterisk to specify the default loader scheme::\n\n q = sess.query(Object).yield_per(100).\\\n options(lazyload('*'), joinedload(Object.some_related))\n\n .. warning::\n\n Use this method with caution; if the same instance is\n present in more than one batch of rows, end-user changes\n to attributes will be overwritten.\n\n In particular, it's usually impossible to use this setting\n with eagerly loaded collections (i.e. any lazy='joined' or\n 'subquery') since those collections will be cleared for a\n new load when encountered in a subsequent result batch.\n In the case of 'subquery' loading, the full result for all\n rows is fetched which generally defeats the purpose of\n :meth:`~sqlalchemy.orm.query.Query.yield_per`.\n\n Also note that while\n :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the\n ``stream_results`` execution option to True, currently\n this is only understood by\n :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect\n which will stream results using server side cursors\n instead of pre-buffer all rows for this query. Other\n DBAPIs **pre-buffer all rows** before making them\n available. The memory use of raw database rows is much less\n than that of an ORM-mapped object, but should still be taken into\n consideration when benchmarking.\n\n .. seealso::\n\n :meth:`.Query.enable_eagerloads`\n\n " self._yield_per = count self._execution_options = self._execution_options.union({'stream_results': True})
8,542,419,139,688,637,000
Yield only ``count`` rows at a time. The purpose of this method is when fetching very large result sets (> 10K rows), to batch results in sub-collections and yield them out partially, so that the Python interpreter doesn't need to declare very large areas of memory which is both time consuming and leads to excessive memory use. The performance from fetching hundreds of thousands of rows can often double when a suitable yield-per setting (e.g. approximately 1000) is used, even with DBAPIs that buffer rows (which are most). The :meth:`.Query.yield_per` method **is not compatible with most eager loading schemes, including subqueryload and joinedload with collections**. For this reason, it may be helpful to disable eager loads, either unconditionally with :meth:`.Query.enable_eagerloads`:: q = sess.query(Object).yield_per(100).enable_eagerloads(False) Or more selectively using :func:`.lazyload`; such as with an asterisk to specify the default loader scheme:: q = sess.query(Object).yield_per(100).\ options(lazyload('*'), joinedload(Object.some_related)) .. warning:: Use this method with caution; if the same instance is present in more than one batch of rows, end-user changes to attributes will be overwritten. In particular, it's usually impossible to use this setting with eagerly loaded collections (i.e. any lazy='joined' or 'subquery') since those collections will be cleared for a new load when encountered in a subsequent result batch. In the case of 'subquery' loading, the full result for all rows is fetched which generally defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`. Also note that while :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the ``stream_results`` execution option to True, currently this is only understood by :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect which will stream results using server side cursors instead of pre-buffer all rows for this query. Other DBAPIs **pre-buffer all rows** before making them available. The memory use of raw database rows is much less than that of an ORM-mapped object, but should still be taken into consideration when benchmarking. .. seealso:: :meth:`.Query.enable_eagerloads`
lib/sqlalchemy/orm/query.py
yield_per
slafs/sqlalchemy
python
@_generative() def yield_per(self, count): "Yield only ``count`` rows at a time.\n\n The purpose of this method is when fetching very large result sets\n (> 10K rows), to batch results in sub-collections and yield them\n out partially, so that the Python interpreter doesn't need to declare\n very large areas of memory which is both time consuming and leads\n to excessive memory use. The performance from fetching hundreds of\n thousands of rows can often double when a suitable yield-per setting\n (e.g. approximately 1000) is used, even with DBAPIs that buffer\n rows (which are most).\n\n The :meth:`.Query.yield_per` method **is not compatible with most\n eager loading schemes, including subqueryload and joinedload with\n collections**. For this reason, it may be helpful to disable\n eager loads, either unconditionally with\n :meth:`.Query.enable_eagerloads`::\n\n q = sess.query(Object).yield_per(100).enable_eagerloads(False)\n\n Or more selectively using :func:`.lazyload`; such as with\n an asterisk to specify the default loader scheme::\n\n q = sess.query(Object).yield_per(100).\\\n options(lazyload('*'), joinedload(Object.some_related))\n\n .. warning::\n\n Use this method with caution; if the same instance is\n present in more than one batch of rows, end-user changes\n to attributes will be overwritten.\n\n In particular, it's usually impossible to use this setting\n with eagerly loaded collections (i.e. any lazy='joined' or\n 'subquery') since those collections will be cleared for a\n new load when encountered in a subsequent result batch.\n In the case of 'subquery' loading, the full result for all\n rows is fetched which generally defeats the purpose of\n :meth:`~sqlalchemy.orm.query.Query.yield_per`.\n\n Also note that while\n :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the\n ``stream_results`` execution option to True, currently\n this is only understood by\n :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect\n which will stream results using server side cursors\n instead of pre-buffer all rows for this query. Other\n DBAPIs **pre-buffer all rows** before making them\n available. The memory use of raw database rows is much less\n than that of an ORM-mapped object, but should still be taken into\n consideration when benchmarking.\n\n .. seealso::\n\n :meth:`.Query.enable_eagerloads`\n\n " self._yield_per = count self._execution_options = self._execution_options.union({'stream_results': True})
def get(self, ident): "Return an instance based on the given primary key identifier,\n or ``None`` if not found.\n\n E.g.::\n\n my_user = session.query(User).get(5)\n\n some_object = session.query(VersionedFoo).get((5, 10))\n\n :meth:`~.Query.get` is special in that it provides direct\n access to the identity map of the owning :class:`.Session`.\n If the given primary key identifier is present\n in the local identity map, the object is returned\n directly from this collection and no SQL is emitted,\n unless the object has been marked fully expired.\n If not present,\n a SELECT is performed in order to locate the object.\n\n :meth:`~.Query.get` also will perform a check if\n the object is present in the identity map and\n marked as expired - a SELECT\n is emitted to refresh the object as well as to\n ensure that the row is still present.\n If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.\n\n :meth:`~.Query.get` is only used to return a single\n mapped instance, not multiple instances or\n individual column constructs, and strictly\n on a single primary key value. The originating\n :class:`.Query` must be constructed in this way,\n i.e. against a single mapped entity,\n with no additional filtering criterion. Loading\n options via :meth:`~.Query.options` may be applied\n however, and will be used if the object is not\n yet locally present.\n\n A lazy-loading, many-to-one attribute configured\n by :func:`.relationship`, using a simple\n foreign-key-to-primary-key criterion, will also use an\n operation equivalent to :meth:`~.Query.get` in order to retrieve\n the target value from the local identity map\n before querying the database. See :doc:`/orm/loading`\n for further details on relationship loading.\n\n :param ident: A scalar or tuple value representing\n the primary key. For a composite primary key,\n the order of identifiers corresponds in most cases\n to that of the mapped :class:`.Table` object's\n primary key columns. For a :func:`.mapper` that\n was given the ``primary key`` argument during\n construction, the order of identifiers corresponds\n to the elements present in this collection.\n\n :return: The object instance, or ``None``.\n\n " if hasattr(ident, '__composite_values__'): ident = ident.__composite_values__() ident = util.to_list(ident) mapper = self._only_full_mapper_zero('get') if (len(ident) != len(mapper.primary_key)): raise sa_exc.InvalidRequestError(('Incorrect number of values in identifier to formulate primary key for query.get(); primary key columns are %s' % ','.join((("'%s'" % c) for c in mapper.primary_key)))) key = mapper.identity_key_from_primary_key(ident) if ((not self._populate_existing) and (not mapper.always_refresh) and (self._for_update_arg is None)): instance = loading.get_from_identity(self.session, key, attributes.PASSIVE_OFF) if (instance is not None): self._get_existing_condition() if (not issubclass(instance.__class__, mapper.class_)): return None return instance return loading.load_on_ident(self, key)
-3,563,799,869,527,115,300
Return an instance based on the given primary key identifier, or ``None`` if not found. E.g.:: my_user = session.query(User).get(5) some_object = session.query(VersionedFoo).get((5, 10)) :meth:`~.Query.get` is special in that it provides direct access to the identity map of the owning :class:`.Session`. If the given primary key identifier is present in the local identity map, the object is returned directly from this collection and no SQL is emitted, unless the object has been marked fully expired. If not present, a SELECT is performed in order to locate the object. :meth:`~.Query.get` also will perform a check if the object is present in the identity map and marked as expired - a SELECT is emitted to refresh the object as well as to ensure that the row is still present. If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. :meth:`~.Query.get` is only used to return a single mapped instance, not multiple instances or individual column constructs, and strictly on a single primary key value. The originating :class:`.Query` must be constructed in this way, i.e. against a single mapped entity, with no additional filtering criterion. Loading options via :meth:`~.Query.options` may be applied however, and will be used if the object is not yet locally present. A lazy-loading, many-to-one attribute configured by :func:`.relationship`, using a simple foreign-key-to-primary-key criterion, will also use an operation equivalent to :meth:`~.Query.get` in order to retrieve the target value from the local identity map before querying the database. See :doc:`/orm/loading` for further details on relationship loading. :param ident: A scalar or tuple value representing the primary key. For a composite primary key, the order of identifiers corresponds in most cases to that of the mapped :class:`.Table` object's primary key columns. For a :func:`.mapper` that was given the ``primary key`` argument during construction, the order of identifiers corresponds to the elements present in this collection. :return: The object instance, or ``None``.
lib/sqlalchemy/orm/query.py
get
slafs/sqlalchemy
python
def get(self, ident): "Return an instance based on the given primary key identifier,\n or ``None`` if not found.\n\n E.g.::\n\n my_user = session.query(User).get(5)\n\n some_object = session.query(VersionedFoo).get((5, 10))\n\n :meth:`~.Query.get` is special in that it provides direct\n access to the identity map of the owning :class:`.Session`.\n If the given primary key identifier is present\n in the local identity map, the object is returned\n directly from this collection and no SQL is emitted,\n unless the object has been marked fully expired.\n If not present,\n a SELECT is performed in order to locate the object.\n\n :meth:`~.Query.get` also will perform a check if\n the object is present in the identity map and\n marked as expired - a SELECT\n is emitted to refresh the object as well as to\n ensure that the row is still present.\n If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.\n\n :meth:`~.Query.get` is only used to return a single\n mapped instance, not multiple instances or\n individual column constructs, and strictly\n on a single primary key value. The originating\n :class:`.Query` must be constructed in this way,\n i.e. against a single mapped entity,\n with no additional filtering criterion. Loading\n options via :meth:`~.Query.options` may be applied\n however, and will be used if the object is not\n yet locally present.\n\n A lazy-loading, many-to-one attribute configured\n by :func:`.relationship`, using a simple\n foreign-key-to-primary-key criterion, will also use an\n operation equivalent to :meth:`~.Query.get` in order to retrieve\n the target value from the local identity map\n before querying the database. See :doc:`/orm/loading`\n for further details on relationship loading.\n\n :param ident: A scalar or tuple value representing\n the primary key. For a composite primary key,\n the order of identifiers corresponds in most cases\n to that of the mapped :class:`.Table` object's\n primary key columns. For a :func:`.mapper` that\n was given the ``primary key`` argument during\n construction, the order of identifiers corresponds\n to the elements present in this collection.\n\n :return: The object instance, or ``None``.\n\n " if hasattr(ident, '__composite_values__'): ident = ident.__composite_values__() ident = util.to_list(ident) mapper = self._only_full_mapper_zero('get') if (len(ident) != len(mapper.primary_key)): raise sa_exc.InvalidRequestError(('Incorrect number of values in identifier to formulate primary key for query.get(); primary key columns are %s' % ','.join((("'%s'" % c) for c in mapper.primary_key)))) key = mapper.identity_key_from_primary_key(ident) if ((not self._populate_existing) and (not mapper.always_refresh) and (self._for_update_arg is None)): instance = loading.get_from_identity(self.session, key, attributes.PASSIVE_OFF) if (instance is not None): self._get_existing_condition() if (not issubclass(instance.__class__, mapper.class_)): return None return instance return loading.load_on_ident(self, key)
@_generative() def correlate(self, *args): 'Return a :class:`.Query` construct which will correlate the given\n FROM clauses to that of an enclosing :class:`.Query` or\n :func:`~.expression.select`.\n\n The method here accepts mapped classes, :func:`.aliased` constructs,\n and :func:`.mapper` constructs as arguments, which are resolved into\n expression constructs, in addition to appropriate expression\n constructs.\n\n The correlation arguments are ultimately passed to\n :meth:`.Select.correlate` after coercion to expression constructs.\n\n The correlation arguments take effect in such cases\n as when :meth:`.Query.from_self` is used, or when\n a subquery as returned by :meth:`.Query.subquery` is\n embedded in another :func:`~.expression.select` construct.\n\n ' self._correlate = self._correlate.union(((_interpret_as_from(s) if (s is not None) else None) for s in args))
8,299,163,503,500,809,000
Return a :class:`.Query` construct which will correlate the given FROM clauses to that of an enclosing :class:`.Query` or :func:`~.expression.select`. The method here accepts mapped classes, :func:`.aliased` constructs, and :func:`.mapper` constructs as arguments, which are resolved into expression constructs, in addition to appropriate expression constructs. The correlation arguments are ultimately passed to :meth:`.Select.correlate` after coercion to expression constructs. The correlation arguments take effect in such cases as when :meth:`.Query.from_self` is used, or when a subquery as returned by :meth:`.Query.subquery` is embedded in another :func:`~.expression.select` construct.
lib/sqlalchemy/orm/query.py
correlate
slafs/sqlalchemy
python
@_generative() def correlate(self, *args): 'Return a :class:`.Query` construct which will correlate the given\n FROM clauses to that of an enclosing :class:`.Query` or\n :func:`~.expression.select`.\n\n The method here accepts mapped classes, :func:`.aliased` constructs,\n and :func:`.mapper` constructs as arguments, which are resolved into\n expression constructs, in addition to appropriate expression\n constructs.\n\n The correlation arguments are ultimately passed to\n :meth:`.Select.correlate` after coercion to expression constructs.\n\n The correlation arguments take effect in such cases\n as when :meth:`.Query.from_self` is used, or when\n a subquery as returned by :meth:`.Query.subquery` is\n embedded in another :func:`~.expression.select` construct.\n\n ' self._correlate = self._correlate.union(((_interpret_as_from(s) if (s is not None) else None) for s in args))
@_generative() def autoflush(self, setting): "Return a Query with a specific 'autoflush' setting.\n\n Note that a Session with autoflush=False will\n not autoflush, even if this flag is set to True at the\n Query level. Therefore this flag is usually used only\n to disable autoflush for a specific Query.\n\n " self._autoflush = setting
-6,814,322,319,448,208,000
Return a Query with a specific 'autoflush' setting. Note that a Session with autoflush=False will not autoflush, even if this flag is set to True at the Query level. Therefore this flag is usually used only to disable autoflush for a specific Query.
lib/sqlalchemy/orm/query.py
autoflush
slafs/sqlalchemy
python
@_generative() def autoflush(self, setting): "Return a Query with a specific 'autoflush' setting.\n\n Note that a Session with autoflush=False will\n not autoflush, even if this flag is set to True at the\n Query level. Therefore this flag is usually used only\n to disable autoflush for a specific Query.\n\n " self._autoflush = setting
@_generative() def populate_existing(self): "Return a :class:`.Query` that will expire and refresh all instances\n as they are loaded, or reused from the current :class:`.Session`.\n\n :meth:`.populate_existing` does not improve behavior when\n the ORM is used normally - the :class:`.Session` object's usual\n behavior of maintaining a transaction and expiring all attributes\n after rollback or commit handles object state automatically.\n This method is not intended for general use.\n\n " self._populate_existing = True
-4,353,034,707,983,916,000
Return a :class:`.Query` that will expire and refresh all instances as they are loaded, or reused from the current :class:`.Session`. :meth:`.populate_existing` does not improve behavior when the ORM is used normally - the :class:`.Session` object's usual behavior of maintaining a transaction and expiring all attributes after rollback or commit handles object state automatically. This method is not intended for general use.
lib/sqlalchemy/orm/query.py
populate_existing
slafs/sqlalchemy
python
@_generative() def populate_existing(self): "Return a :class:`.Query` that will expire and refresh all instances\n as they are loaded, or reused from the current :class:`.Session`.\n\n :meth:`.populate_existing` does not improve behavior when\n the ORM is used normally - the :class:`.Session` object's usual\n behavior of maintaining a transaction and expiring all attributes\n after rollback or commit handles object state automatically.\n This method is not intended for general use.\n\n " self._populate_existing = True
@_generative() def _with_invoke_all_eagers(self, value): "Set the 'invoke all eagers' flag which causes joined- and\n subquery loaders to traverse into already-loaded related objects\n and collections.\n\n Default is that of :attr:`.Query._invoke_all_eagers`.\n\n " self._invoke_all_eagers = value
2,516,530,542,997,360,000
Set the 'invoke all eagers' flag which causes joined- and subquery loaders to traverse into already-loaded related objects and collections. Default is that of :attr:`.Query._invoke_all_eagers`.
lib/sqlalchemy/orm/query.py
_with_invoke_all_eagers
slafs/sqlalchemy
python
@_generative() def _with_invoke_all_eagers(self, value): "Set the 'invoke all eagers' flag which causes joined- and\n subquery loaders to traverse into already-loaded related objects\n and collections.\n\n Default is that of :attr:`.Query._invoke_all_eagers`.\n\n " self._invoke_all_eagers = value
def with_parent(self, instance, property=None): "Add filtering criterion that relates the given instance\n to a child object or collection, using its attribute state\n as well as an established :func:`.relationship()`\n configuration.\n\n The method uses the :func:`.with_parent` function to generate\n the clause, the result of which is passed to :meth:`.Query.filter`.\n\n Parameters are the same as :func:`.with_parent`, with the exception\n that the given property can be None, in which case a search is\n performed against this :class:`.Query` object's target mapper.\n\n " if (property is None): mapper = object_mapper(instance) for prop in mapper.iterate_properties: if (isinstance(prop, properties.RelationshipProperty) and (prop.mapper is self._mapper_zero())): property = prop break else: raise sa_exc.InvalidRequestError(("Could not locate a property which relates instances of class '%s' to instances of class '%s'" % (self._mapper_zero().class_.__name__, instance.__class__.__name__))) return self.filter(with_parent(instance, property))
3,725,749,902,457,954,300
Add filtering criterion that relates the given instance to a child object or collection, using its attribute state as well as an established :func:`.relationship()` configuration. The method uses the :func:`.with_parent` function to generate the clause, the result of which is passed to :meth:`.Query.filter`. Parameters are the same as :func:`.with_parent`, with the exception that the given property can be None, in which case a search is performed against this :class:`.Query` object's target mapper.
lib/sqlalchemy/orm/query.py
with_parent
slafs/sqlalchemy
python
def with_parent(self, instance, property=None): "Add filtering criterion that relates the given instance\n to a child object or collection, using its attribute state\n as well as an established :func:`.relationship()`\n configuration.\n\n The method uses the :func:`.with_parent` function to generate\n the clause, the result of which is passed to :meth:`.Query.filter`.\n\n Parameters are the same as :func:`.with_parent`, with the exception\n that the given property can be None, in which case a search is\n performed against this :class:`.Query` object's target mapper.\n\n " if (property is None): mapper = object_mapper(instance) for prop in mapper.iterate_properties: if (isinstance(prop, properties.RelationshipProperty) and (prop.mapper is self._mapper_zero())): property = prop break else: raise sa_exc.InvalidRequestError(("Could not locate a property which relates instances of class '%s' to instances of class '%s'" % (self._mapper_zero().class_.__name__, instance.__class__.__name__))) return self.filter(with_parent(instance, property))
@_generative() def add_entity(self, entity, alias=None): 'add a mapped entity to the list of result columns\n to be returned.' if (alias is not None): entity = aliased(entity, alias) self._entities = list(self._entities) m = _MapperEntity(self, entity) self._set_entity_selectables([m])
4,260,054,064,693,129,000
add a mapped entity to the list of result columns to be returned.
lib/sqlalchemy/orm/query.py
add_entity
slafs/sqlalchemy
python
@_generative() def add_entity(self, entity, alias=None): 'add a mapped entity to the list of result columns\n to be returned.' if (alias is not None): entity = aliased(entity, alias) self._entities = list(self._entities) m = _MapperEntity(self, entity) self._set_entity_selectables([m])
@_generative() def with_session(self, session): 'Return a :class:`.Query` that will use the given :class:`.Session`.\n\n ' self.session = session
-7,848,927,941,019,966,000
Return a :class:`.Query` that will use the given :class:`.Session`.
lib/sqlalchemy/orm/query.py
with_session
slafs/sqlalchemy
python
@_generative() def with_session(self, session): '\n\n ' self.session = session
def from_self(self, *entities): "return a Query that selects from this Query's\n SELECT statement.\n\n \\*entities - optional list of entities which will replace\n those being selected.\n\n " fromclause = self.with_labels().enable_eagerloads(False).statement.correlate(None) q = self._from_selectable(fromclause) q._enable_single_crit = False if entities: q._set_entities(entities) return q
-4,215,021,601,722,620,400
return a Query that selects from this Query's SELECT statement. \*entities - optional list of entities which will replace those being selected.
lib/sqlalchemy/orm/query.py
from_self
slafs/sqlalchemy
python
def from_self(self, *entities): "return a Query that selects from this Query's\n SELECT statement.\n\n \\*entities - optional list of entities which will replace\n those being selected.\n\n " fromclause = self.with_labels().enable_eagerloads(False).statement.correlate(None) q = self._from_selectable(fromclause) q._enable_single_crit = False if entities: q._set_entities(entities) return q
def values(self, *columns): 'Return an iterator yielding result tuples corresponding\n to the given list of columns' if (not columns): return iter(()) q = self._clone() q._set_entities(columns, entity_wrapper=_ColumnEntity) if (not q._yield_per): q._yield_per = 10 return iter(q)
-2,566,075,710,266,303,000
Return an iterator yielding result tuples corresponding to the given list of columns
lib/sqlalchemy/orm/query.py
values
slafs/sqlalchemy
python
def values(self, *columns): 'Return an iterator yielding result tuples corresponding\n to the given list of columns' if (not columns): return iter(()) q = self._clone() q._set_entities(columns, entity_wrapper=_ColumnEntity) if (not q._yield_per): q._yield_per = 10 return iter(q)
def value(self, column): 'Return a scalar result corresponding to the given\n column expression.' try: return next(self.values(column))[0] except StopIteration: return None
-8,450,482,974,219,938,000
Return a scalar result corresponding to the given column expression.
lib/sqlalchemy/orm/query.py
value
slafs/sqlalchemy
python
def value(self, column): 'Return a scalar result corresponding to the given\n column expression.' try: return next(self.values(column))[0] except StopIteration: return None
@_generative() def with_entities(self, *entities): "Return a new :class:`.Query` replacing the SELECT list with the\n given entities.\n\n e.g.::\n\n # Users, filtered on some arbitrary criterion\n # and then ordered by related email address\n q = session.query(User).\\\n join(User.address).\\\n filter(User.name.like('%ed%')).\\\n order_by(Address.email)\n\n # given *only* User.id==5, Address.email, and 'q', what\n # would the *next* User in the result be ?\n subq = q.with_entities(Address.email).\\\n order_by(None).\\\n filter(User.id==5).\\\n subquery()\n q = q.join((subq, subq.c.email < Address.email)).\\\n limit(1)\n\n .. versionadded:: 0.6.5\n\n " self._set_entities(entities)
-2,123,473,005,620,002,000
Return a new :class:`.Query` replacing the SELECT list with the given entities. e.g.:: # Users, filtered on some arbitrary criterion # and then ordered by related email address q = session.query(User).\ join(User.address).\ filter(User.name.like('%ed%')).\ order_by(Address.email) # given *only* User.id==5, Address.email, and 'q', what # would the *next* User in the result be ? subq = q.with_entities(Address.email).\ order_by(None).\ filter(User.id==5).\ subquery() q = q.join((subq, subq.c.email < Address.email)).\ limit(1) .. versionadded:: 0.6.5
lib/sqlalchemy/orm/query.py
with_entities
slafs/sqlalchemy
python
@_generative() def with_entities(self, *entities): "Return a new :class:`.Query` replacing the SELECT list with the\n given entities.\n\n e.g.::\n\n # Users, filtered on some arbitrary criterion\n # and then ordered by related email address\n q = session.query(User).\\\n join(User.address).\\\n filter(User.name.like('%ed%')).\\\n order_by(Address.email)\n\n # given *only* User.id==5, Address.email, and 'q', what\n # would the *next* User in the result be ?\n subq = q.with_entities(Address.email).\\\n order_by(None).\\\n filter(User.id==5).\\\n subquery()\n q = q.join((subq, subq.c.email < Address.email)).\\\n limit(1)\n\n .. versionadded:: 0.6.5\n\n " self._set_entities(entities)
@_generative() def add_columns(self, *column): 'Add one or more column expressions to the list\n of result columns to be returned.' self._entities = list(self._entities) l = len(self._entities) for c in column: _ColumnEntity(self, c) self._set_entity_selectables(self._entities[l:])
-3,059,124,657,342,454,000
Add one or more column expressions to the list of result columns to be returned.
lib/sqlalchemy/orm/query.py
add_columns
slafs/sqlalchemy
python
@_generative() def add_columns(self, *column): 'Add one or more column expressions to the list\n of result columns to be returned.' self._entities = list(self._entities) l = len(self._entities) for c in column: _ColumnEntity(self, c) self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation('0.7', ':meth:`.add_column` is superseded by :meth:`.add_columns`', False) def add_column(self, column): 'Add a column expression to the list of result columns to be\n returned.\n\n Pending deprecation: :meth:`.add_column` will be superseded by\n :meth:`.add_columns`.\n\n ' return self.add_columns(column)
-1,024,067,364,979,711,500
Add a column expression to the list of result columns to be returned. Pending deprecation: :meth:`.add_column` will be superseded by :meth:`.add_columns`.
lib/sqlalchemy/orm/query.py
add_column
slafs/sqlalchemy
python
@util.pending_deprecation('0.7', ':meth:`.add_column` is superseded by :meth:`.add_columns`', False) def add_column(self, column): 'Add a column expression to the list of result columns to be\n returned.\n\n Pending deprecation: :meth:`.add_column` will be superseded by\n :meth:`.add_columns`.\n\n ' return self.add_columns(column)
def options(self, *args): 'Return a new Query object, applying the given list of\n mapper options.\n\n Most supplied options regard changing how column- and\n relationship-mapped attributes are loaded. See the sections\n :ref:`deferred` and :doc:`/orm/loading` for reference\n documentation.\n\n ' return self._options(False, *args)
-1,856,513,415,575,226,600
Return a new Query object, applying the given list of mapper options. Most supplied options regard changing how column- and relationship-mapped attributes are loaded. See the sections :ref:`deferred` and :doc:`/orm/loading` for reference documentation.
lib/sqlalchemy/orm/query.py
options
slafs/sqlalchemy
python
def options(self, *args): 'Return a new Query object, applying the given list of\n mapper options.\n\n Most supplied options regard changing how column- and\n relationship-mapped attributes are loaded. See the sections\n :ref:`deferred` and :doc:`/orm/loading` for reference\n documentation.\n\n ' return self._options(False, *args)
def with_transformation(self, fn): 'Return a new :class:`.Query` object transformed by\n the given function.\n\n E.g.::\n\n def filter_something(criterion):\n def transform(q):\n return q.filter(criterion)\n return transform\n\n q = q.with_transformation(filter_something(x==5))\n\n This allows ad-hoc recipes to be created for :class:`.Query`\n objects. See the example at :ref:`hybrid_transformers`.\n\n .. versionadded:: 0.7.4\n\n ' return fn(self)
-736,751,184,440,080,300
Return a new :class:`.Query` object transformed by the given function. E.g.:: def filter_something(criterion): def transform(q): return q.filter(criterion) return transform q = q.with_transformation(filter_something(x==5)) This allows ad-hoc recipes to be created for :class:`.Query` objects. See the example at :ref:`hybrid_transformers`. .. versionadded:: 0.7.4
lib/sqlalchemy/orm/query.py
with_transformation
slafs/sqlalchemy
python
def with_transformation(self, fn): 'Return a new :class:`.Query` object transformed by\n the given function.\n\n E.g.::\n\n def filter_something(criterion):\n def transform(q):\n return q.filter(criterion)\n return transform\n\n q = q.with_transformation(filter_something(x==5))\n\n This allows ad-hoc recipes to be created for :class:`.Query`\n objects. See the example at :ref:`hybrid_transformers`.\n\n .. versionadded:: 0.7.4\n\n ' return fn(self)
@_generative() def with_hint(self, selectable, text, dialect_name='*'): 'Add an indexing or other executional context\n hint for the given entity or selectable to\n this :class:`.Query`.\n\n Functionality is passed straight through to\n :meth:`~sqlalchemy.sql.expression.Select.with_hint`,\n with the addition that ``selectable`` can be a\n :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class\n /etc.\n\n .. seealso::\n\n :meth:`.Query.with_statement_hint`\n\n ' if (selectable is not None): selectable = inspect(selectable).selectable self._with_hints += ((selectable, text, dialect_name),)
-168,000,100,214,669,760
Add an indexing or other executional context hint for the given entity or selectable to this :class:`.Query`. Functionality is passed straight through to :meth:`~sqlalchemy.sql.expression.Select.with_hint`, with the addition that ``selectable`` can be a :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class /etc. .. seealso:: :meth:`.Query.with_statement_hint`
lib/sqlalchemy/orm/query.py
with_hint
slafs/sqlalchemy
python
@_generative() def with_hint(self, selectable, text, dialect_name='*'): 'Add an indexing or other executional context\n hint for the given entity or selectable to\n this :class:`.Query`.\n\n Functionality is passed straight through to\n :meth:`~sqlalchemy.sql.expression.Select.with_hint`,\n with the addition that ``selectable`` can be a\n :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class\n /etc.\n\n .. seealso::\n\n :meth:`.Query.with_statement_hint`\n\n ' if (selectable is not None): selectable = inspect(selectable).selectable self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'): 'add a statement hint to this :class:`.Select`.\n\n This method is similar to :meth:`.Select.with_hint` except that\n it does not require an individual table, and instead applies to the\n statement as a whole.\n\n This feature calls down into :meth:`.Select.with_statement_hint`.\n\n .. versionadded:: 1.0.0\n\n .. seealso::\n\n :meth:`.Query.with_hint`\n\n ' return self.with_hint(None, text, dialect_name)
5,776,794,080,481,404,000
add a statement hint to this :class:`.Select`. This method is similar to :meth:`.Select.with_hint` except that it does not require an individual table, and instead applies to the statement as a whole. This feature calls down into :meth:`.Select.with_statement_hint`. .. versionadded:: 1.0.0 .. seealso:: :meth:`.Query.with_hint`
lib/sqlalchemy/orm/query.py
with_statement_hint
slafs/sqlalchemy
python
def with_statement_hint(self, text, dialect_name='*'): 'add a statement hint to this :class:`.Select`.\n\n This method is similar to :meth:`.Select.with_hint` except that\n it does not require an individual table, and instead applies to the\n statement as a whole.\n\n This feature calls down into :meth:`.Select.with_statement_hint`.\n\n .. versionadded:: 1.0.0\n\n .. seealso::\n\n :meth:`.Query.with_hint`\n\n ' return self.with_hint(None, text, dialect_name)
@_generative() def execution_options(self, **kwargs): ' Set non-SQL options which take effect during execution.\n\n The options are the same as those accepted by\n :meth:`.Connection.execution_options`.\n\n Note that the ``stream_results`` execution option is enabled\n automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`\n method is used.\n\n ' self._execution_options = self._execution_options.union(kwargs)
133,150,205,277,101,300
Set non-SQL options which take effect during execution. The options are the same as those accepted by :meth:`.Connection.execution_options`. Note that the ``stream_results`` execution option is enabled automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` method is used.
lib/sqlalchemy/orm/query.py
execution_options
slafs/sqlalchemy
python
@_generative() def execution_options(self, **kwargs): ' Set non-SQL options which take effect during execution.\n\n The options are the same as those accepted by\n :meth:`.Connection.execution_options`.\n\n Note that the ``stream_results`` execution option is enabled\n automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`\n method is used.\n\n ' self._execution_options = self._execution_options.union(kwargs)
@_generative() def with_lockmode(self, mode): 'Return a new :class:`.Query` object with the specified "locking mode",\n which essentially refers to the ``FOR UPDATE`` clause.\n\n .. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.\n\n :param mode: a string representing the desired locking mode.\n Valid values are:\n\n * ``None`` - translates to no lockmode\n\n * ``\'update\'`` - translates to ``FOR UPDATE``\n (standard SQL, supported by most dialects)\n\n * ``\'update_nowait\'`` - translates to ``FOR UPDATE NOWAIT``\n (supported by Oracle, PostgreSQL 8.1 upwards)\n\n * ``\'read\'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),\n and ``FOR SHARE`` (for PostgreSQL)\n\n .. seealso::\n\n :meth:`.Query.with_for_update` - improved API for\n specifying the ``FOR UPDATE`` clause.\n\n ' self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
-3,654,983,027,576,608,000
Return a new :class:`.Query` object with the specified "locking mode", which essentially refers to the ``FOR UPDATE`` clause. .. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`. :param mode: a string representing the desired locking mode. Valid values are: * ``None`` - translates to no lockmode * ``'update'`` - translates to ``FOR UPDATE`` (standard SQL, supported by most dialects) * ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT`` (supported by Oracle, PostgreSQL 8.1 upwards) * ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL), and ``FOR SHARE`` (for PostgreSQL) .. seealso:: :meth:`.Query.with_for_update` - improved API for specifying the ``FOR UPDATE`` clause.
lib/sqlalchemy/orm/query.py
with_lockmode
slafs/sqlalchemy
python
@_generative() def with_lockmode(self, mode): 'Return a new :class:`.Query` object with the specified "locking mode",\n which essentially refers to the ``FOR UPDATE`` clause.\n\n .. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.\n\n :param mode: a string representing the desired locking mode.\n Valid values are:\n\n * ``None`` - translates to no lockmode\n\n * ``\'update\'`` - translates to ``FOR UPDATE``\n (standard SQL, supported by most dialects)\n\n * ``\'update_nowait\'`` - translates to ``FOR UPDATE NOWAIT``\n (supported by Oracle, PostgreSQL 8.1 upwards)\n\n * ``\'read\'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),\n and ``FOR SHARE`` (for PostgreSQL)\n\n .. seealso::\n\n :meth:`.Query.with_for_update` - improved API for\n specifying the ``FOR UPDATE`` clause.\n\n ' self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative() def with_for_update(self, read=False, nowait=False, of=None): 'return a new :class:`.Query` with the specified options for the\n ``FOR UPDATE`` clause.\n\n The behavior of this method is identical to that of\n :meth:`.SelectBase.with_for_update`. When called with no arguments,\n the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause\n appended. When additional arguments are specified, backend-specific\n options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``\n can take effect.\n\n E.g.::\n\n q = sess.query(User).with_for_update(nowait=True, of=User)\n\n The above query on a Postgresql backend will render like::\n\n SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT\n\n .. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes\n the :meth:`.Query.with_lockmode` method.\n\n .. seealso::\n\n :meth:`.GenerativeSelect.with_for_update` - Core level method with\n full argument and behavioral description.\n\n ' self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
-5,221,234,547,136,602,000
return a new :class:`.Query` with the specified options for the ``FOR UPDATE`` clause. The behavior of this method is identical to that of :meth:`.SelectBase.with_for_update`. When called with no arguments, the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause appended. When additional arguments are specified, backend-specific options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE`` can take effect. E.g.:: q = sess.query(User).with_for_update(nowait=True, of=User) The above query on a Postgresql backend will render like:: SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT .. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes the :meth:`.Query.with_lockmode` method. .. seealso:: :meth:`.GenerativeSelect.with_for_update` - Core level method with full argument and behavioral description.
lib/sqlalchemy/orm/query.py
with_for_update
slafs/sqlalchemy
python
@_generative() def with_for_update(self, read=False, nowait=False, of=None): 'return a new :class:`.Query` with the specified options for the\n ``FOR UPDATE`` clause.\n\n The behavior of this method is identical to that of\n :meth:`.SelectBase.with_for_update`. When called with no arguments,\n the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause\n appended. When additional arguments are specified, backend-specific\n options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``\n can take effect.\n\n E.g.::\n\n q = sess.query(User).with_for_update(nowait=True, of=User)\n\n The above query on a Postgresql backend will render like::\n\n SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT\n\n .. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes\n the :meth:`.Query.with_lockmode` method.\n\n .. seealso::\n\n :meth:`.GenerativeSelect.with_for_update` - Core level method with\n full argument and behavioral description.\n\n ' self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative() def params(self, *args, **kwargs): 'add values for bind parameters which may have been\n specified in filter().\n\n parameters may be specified using \\**kwargs, or optionally a single\n dictionary as the first positional argument. The reason for both is\n that \\**kwargs is convenient, however some parameter dictionaries\n contain unicode keys in which case \\**kwargs cannot be used.\n\n ' if (len(args) == 1): kwargs.update(args[0]) elif (len(args) > 0): raise sa_exc.ArgumentError('params() takes zero or one positional argument, which is a dictionary.') self._params = self._params.copy() self._params.update(kwargs)
1,281,354,998,151,989,500
add values for bind parameters which may have been specified in filter(). parameters may be specified using \**kwargs, or optionally a single dictionary as the first positional argument. The reason for both is that \**kwargs is convenient, however some parameter dictionaries contain unicode keys in which case \**kwargs cannot be used.
lib/sqlalchemy/orm/query.py
params
slafs/sqlalchemy
python
@_generative() def params(self, *args, **kwargs): 'add values for bind parameters which may have been\n specified in filter().\n\n parameters may be specified using \\**kwargs, or optionally a single\n dictionary as the first positional argument. The reason for both is\n that \\**kwargs is convenient, however some parameter dictionaries\n contain unicode keys in which case \\**kwargs cannot be used.\n\n ' if (len(args) == 1): kwargs.update(args[0]) elif (len(args) > 0): raise sa_exc.ArgumentError('params() takes zero or one positional argument, which is a dictionary.') self._params = self._params.copy() self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset) def filter(self, *criterion): "apply the given filtering criterion to a copy\n of this :class:`.Query`, using SQL expressions.\n\n e.g.::\n\n session.query(MyClass).filter(MyClass.name == 'some name')\n\n Multiple criteria are joined together by AND::\n\n session.query(MyClass).\\\n filter(MyClass.name == 'some name', MyClass.id > 5)\n\n The criterion is any SQL expression object applicable to the\n WHERE clause of a select. String expressions are coerced\n into SQL expression constructs via the :func:`.text` construct.\n\n .. versionchanged:: 0.7.5\n Multiple criteria joined by AND.\n\n .. seealso::\n\n :meth:`.Query.filter_by` - filter on keyword expressions.\n\n " for criterion in list(criterion): criterion = expression._expression_literal_as_text(criterion) criterion = self._adapt_clause(criterion, True, True) if (self._criterion is not None): self._criterion = (self._criterion & criterion) else: self._criterion = criterion
4,063,492,974,036,546,000
apply the given filtering criterion to a copy of this :class:`.Query`, using SQL expressions. e.g.:: session.query(MyClass).filter(MyClass.name == 'some name') Multiple criteria are joined together by AND:: session.query(MyClass).\ filter(MyClass.name == 'some name', MyClass.id > 5) The criterion is any SQL expression object applicable to the WHERE clause of a select. String expressions are coerced into SQL expression constructs via the :func:`.text` construct. .. versionchanged:: 0.7.5 Multiple criteria joined by AND. .. seealso:: :meth:`.Query.filter_by` - filter on keyword expressions.
lib/sqlalchemy/orm/query.py
filter
slafs/sqlalchemy
python
@_generative(_no_statement_condition, _no_limit_offset) def filter(self, *criterion): "apply the given filtering criterion to a copy\n of this :class:`.Query`, using SQL expressions.\n\n e.g.::\n\n session.query(MyClass).filter(MyClass.name == 'some name')\n\n Multiple criteria are joined together by AND::\n\n session.query(MyClass).\\\n filter(MyClass.name == 'some name', MyClass.id > 5)\n\n The criterion is any SQL expression object applicable to the\n WHERE clause of a select. String expressions are coerced\n into SQL expression constructs via the :func:`.text` construct.\n\n .. versionchanged:: 0.7.5\n Multiple criteria joined by AND.\n\n .. seealso::\n\n :meth:`.Query.filter_by` - filter on keyword expressions.\n\n " for criterion in list(criterion): criterion = expression._expression_literal_as_text(criterion) criterion = self._adapt_clause(criterion, True, True) if (self._criterion is not None): self._criterion = (self._criterion & criterion) else: self._criterion = criterion
def filter_by(self, **kwargs): "apply the given filtering criterion to a copy\n of this :class:`.Query`, using keyword expressions.\n\n e.g.::\n\n session.query(MyClass).filter_by(name = 'some name')\n\n Multiple criteria are joined together by AND::\n\n session.query(MyClass).\\\n filter_by(name = 'some name', id = 5)\n\n The keyword expressions are extracted from the primary\n entity of the query, or the last entity that was the\n target of a call to :meth:`.Query.join`.\n\n .. seealso::\n\n :meth:`.Query.filter` - filter on SQL expressions.\n\n " clauses = [(_entity_descriptor(self._joinpoint_zero(), key) == value) for (key, value) in kwargs.items()] return self.filter(sql.and_(*clauses))
-3,225,263,455,888,484,400
apply the given filtering criterion to a copy of this :class:`.Query`, using keyword expressions. e.g.:: session.query(MyClass).filter_by(name = 'some name') Multiple criteria are joined together by AND:: session.query(MyClass).\ filter_by(name = 'some name', id = 5) The keyword expressions are extracted from the primary entity of the query, or the last entity that was the target of a call to :meth:`.Query.join`. .. seealso:: :meth:`.Query.filter` - filter on SQL expressions.
lib/sqlalchemy/orm/query.py
filter_by
slafs/sqlalchemy
python
def filter_by(self, **kwargs): "apply the given filtering criterion to a copy\n of this :class:`.Query`, using keyword expressions.\n\n e.g.::\n\n session.query(MyClass).filter_by(name = 'some name')\n\n Multiple criteria are joined together by AND::\n\n session.query(MyClass).\\\n filter_by(name = 'some name', id = 5)\n\n The keyword expressions are extracted from the primary\n entity of the query, or the last entity that was the\n target of a call to :meth:`.Query.join`.\n\n .. seealso::\n\n :meth:`.Query.filter` - filter on SQL expressions.\n\n " clauses = [(_entity_descriptor(self._joinpoint_zero(), key) == value) for (key, value) in kwargs.items()] return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset) def order_by(self, *criterion): 'apply one or more ORDER BY criterion to the query and return\n the newly resulting ``Query``\n\n All existing ORDER BY settings can be suppressed by\n passing ``None`` - this will suppress any ORDER BY configured\n on mappers as well.\n\n Alternatively, an existing ORDER BY setting on the Query\n object can be entirely cancelled by passing ``False``\n as the value - use this before calling methods where\n an ORDER BY is invalid.\n\n ' if (len(criterion) == 1): if (criterion[0] is False): if ('_order_by' in self.__dict__): del self._order_by return if (criterion[0] is None): self._order_by = None return criterion = self._adapt_col_list(criterion) if ((self._order_by is False) or (self._order_by is None)): self._order_by = criterion else: self._order_by = (self._order_by + criterion)
-7,147,566,917,387,758,000
apply one or more ORDER BY criterion to the query and return the newly resulting ``Query`` All existing ORDER BY settings can be suppressed by passing ``None`` - this will suppress any ORDER BY configured on mappers as well. Alternatively, an existing ORDER BY setting on the Query object can be entirely cancelled by passing ``False`` as the value - use this before calling methods where an ORDER BY is invalid.
lib/sqlalchemy/orm/query.py
order_by
slafs/sqlalchemy
python
@_generative(_no_statement_condition, _no_limit_offset) def order_by(self, *criterion): 'apply one or more ORDER BY criterion to the query and return\n the newly resulting ``Query``\n\n All existing ORDER BY settings can be suppressed by\n passing ``None`` - this will suppress any ORDER BY configured\n on mappers as well.\n\n Alternatively, an existing ORDER BY setting on the Query\n object can be entirely cancelled by passing ``False``\n as the value - use this before calling methods where\n an ORDER BY is invalid.\n\n ' if (len(criterion) == 1): if (criterion[0] is False): if ('_order_by' in self.__dict__): del self._order_by return if (criterion[0] is None): self._order_by = None return criterion = self._adapt_col_list(criterion) if ((self._order_by is False) or (self._order_by is None)): self._order_by = criterion else: self._order_by = (self._order_by + criterion)
@_generative(_no_statement_condition, _no_limit_offset) def group_by(self, *criterion): 'apply one or more GROUP BY criterion to the query and return\n the newly resulting :class:`.Query`' criterion = list(chain(*[_orm_columns(c) for c in criterion])) criterion = self._adapt_col_list(criterion) if (self._group_by is False): self._group_by = criterion else: self._group_by = (self._group_by + criterion)
-4,990,816,376,501,235,000
apply one or more GROUP BY criterion to the query and return the newly resulting :class:`.Query`
lib/sqlalchemy/orm/query.py
group_by
slafs/sqlalchemy
python
@_generative(_no_statement_condition, _no_limit_offset) def group_by(self, *criterion): 'apply one or more GROUP BY criterion to the query and return\n the newly resulting :class:`.Query`' criterion = list(chain(*[_orm_columns(c) for c in criterion])) criterion = self._adapt_col_list(criterion) if (self._group_by is False): self._group_by = criterion else: self._group_by = (self._group_by + criterion)
@_generative(_no_statement_condition, _no_limit_offset) def having(self, criterion): 'apply a HAVING criterion to the query and return the\n newly resulting :class:`.Query`.\n\n :meth:`~.Query.having` is used in conjunction with\n :meth:`~.Query.group_by`.\n\n HAVING criterion makes it possible to use filters on aggregate\n functions like COUNT, SUM, AVG, MAX, and MIN, eg.::\n\n q = session.query(User.id).\\\n join(User.addresses).\\\n group_by(User.id).\\\n having(func.count(Address.id) > 2)\n\n ' criterion = expression._expression_literal_as_text(criterion) if ((criterion is not None) and (not isinstance(criterion, sql.ClauseElement))): raise sa_exc.ArgumentError('having() argument must be of type sqlalchemy.sql.ClauseElement or string') criterion = self._adapt_clause(criterion, True, True) if (self._having is not None): self._having = (self._having & criterion) else: self._having = criterion
8,944,880,713,640,449,000
apply a HAVING criterion to the query and return the newly resulting :class:`.Query`. :meth:`~.Query.having` is used in conjunction with :meth:`~.Query.group_by`. HAVING criterion makes it possible to use filters on aggregate functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: q = session.query(User.id).\ join(User.addresses).\ group_by(User.id).\ having(func.count(Address.id) > 2)
lib/sqlalchemy/orm/query.py
having
slafs/sqlalchemy
python
@_generative(_no_statement_condition, _no_limit_offset) def having(self, criterion): 'apply a HAVING criterion to the query and return the\n newly resulting :class:`.Query`.\n\n :meth:`~.Query.having` is used in conjunction with\n :meth:`~.Query.group_by`.\n\n HAVING criterion makes it possible to use filters on aggregate\n functions like COUNT, SUM, AVG, MAX, and MIN, eg.::\n\n q = session.query(User.id).\\\n join(User.addresses).\\\n group_by(User.id).\\\n having(func.count(Address.id) > 2)\n\n ' criterion = expression._expression_literal_as_text(criterion) if ((criterion is not None) and (not isinstance(criterion, sql.ClauseElement))): raise sa_exc.ArgumentError('having() argument must be of type sqlalchemy.sql.ClauseElement or string') criterion = self._adapt_clause(criterion, True, True) if (self._having is not None): self._having = (self._having & criterion) else: self._having = criterion
def union(self, *q): "Produce a UNION of this Query against one or more queries.\n\n e.g.::\n\n q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')\n q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')\n\n q3 = q1.union(q2)\n\n The method accepts multiple Query objects so as to control\n the level of nesting. A series of ``union()`` calls such as::\n\n x.union(y).union(z).all()\n\n will nest on each ``union()``, and produces::\n\n SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION\n SELECT * FROM y) UNION SELECT * FROM Z)\n\n Whereas::\n\n x.union(y, z).all()\n\n produces::\n\n SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION\n SELECT * FROM Z)\n\n Note that many database backends do not allow ORDER BY to\n be rendered on a query called within UNION, EXCEPT, etc.\n To disable all ORDER BY clauses including those configured\n on mappers, issue ``query.order_by(None)`` - the resulting\n :class:`.Query` object will not render ORDER BY within\n its SELECT statement.\n\n " return self._from_selectable(expression.union(*([self] + list(q))))
-5,395,724,443,501,062,000
Produce a UNION of this Query against one or more queries. e.g.:: q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar') q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo') q3 = q1.union(q2) The method accepts multiple Query objects so as to control the level of nesting. A series of ``union()`` calls such as:: x.union(y).union(z).all() will nest on each ``union()``, and produces:: SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y) UNION SELECT * FROM Z) Whereas:: x.union(y, z).all() produces:: SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION SELECT * FROM Z) Note that many database backends do not allow ORDER BY to be rendered on a query called within UNION, EXCEPT, etc. To disable all ORDER BY clauses including those configured on mappers, issue ``query.order_by(None)`` - the resulting :class:`.Query` object will not render ORDER BY within its SELECT statement.
lib/sqlalchemy/orm/query.py
union
slafs/sqlalchemy
python
def union(self, *q): "Produce a UNION of this Query against one or more queries.\n\n e.g.::\n\n q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')\n q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')\n\n q3 = q1.union(q2)\n\n The method accepts multiple Query objects so as to control\n the level of nesting. A series of ``union()`` calls such as::\n\n x.union(y).union(z).all()\n\n will nest on each ``union()``, and produces::\n\n SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION\n SELECT * FROM y) UNION SELECT * FROM Z)\n\n Whereas::\n\n x.union(y, z).all()\n\n produces::\n\n SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION\n SELECT * FROM Z)\n\n Note that many database backends do not allow ORDER BY to\n be rendered on a query called within UNION, EXCEPT, etc.\n To disable all ORDER BY clauses including those configured\n on mappers, issue ``query.order_by(None)`` - the resulting\n :class:`.Query` object will not render ORDER BY within\n its SELECT statement.\n\n " return self._from_selectable(expression.union(*([self] + list(q))))
def union_all(self, *q): 'Produce a UNION ALL of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.union_all(*([self] + list(q))))
-7,173,164,358,490,195,000
Produce a UNION ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples.
lib/sqlalchemy/orm/query.py
union_all
slafs/sqlalchemy
python
def union_all(self, *q): 'Produce a UNION ALL of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.union_all(*([self] + list(q))))
def intersect(self, *q): 'Produce an INTERSECT of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.intersect(*([self] + list(q))))
-458,251,160,218,900,860
Produce an INTERSECT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples.
lib/sqlalchemy/orm/query.py
intersect
slafs/sqlalchemy
python
def intersect(self, *q): 'Produce an INTERSECT of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.intersect(*([self] + list(q))))
def intersect_all(self, *q): 'Produce an INTERSECT ALL of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.intersect_all(*([self] + list(q))))
1,162,686,790,410,963,200
Produce an INTERSECT ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples.
lib/sqlalchemy/orm/query.py
intersect_all
slafs/sqlalchemy
python
def intersect_all(self, *q): 'Produce an INTERSECT ALL of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.intersect_all(*([self] + list(q))))
def except_(self, *q): 'Produce an EXCEPT of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.except_(*([self] + list(q))))
1,257,272,805,102,739,700
Produce an EXCEPT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples.
lib/sqlalchemy/orm/query.py
except_
slafs/sqlalchemy
python
def except_(self, *q): 'Produce an EXCEPT of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.except_(*([self] + list(q))))
def except_all(self, *q): 'Produce an EXCEPT ALL of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.except_all(*([self] + list(q))))
-7,427,639,660,594,553,000
Produce an EXCEPT ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples.
lib/sqlalchemy/orm/query.py
except_all
slafs/sqlalchemy
python
def except_all(self, *q): 'Produce an EXCEPT ALL of this Query against one or more queries.\n\n Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See\n that method for usage examples.\n\n ' return self._from_selectable(expression.except_all(*([self] + list(q))))
def join(self, *props, **kwargs): 'Create a SQL JOIN against this :class:`.Query` object\'s criterion\n and apply generatively, returning the newly resulting :class:`.Query`.\n\n **Simple Relationship Joins**\n\n Consider a mapping between two classes ``User`` and ``Address``,\n with a relationship ``User.addresses`` representing a collection\n of ``Address`` objects associated with each ``User``. The most\n common usage of :meth:`~.Query.join` is to create a JOIN along this\n relationship, using the ``User.addresses`` attribute as an indicator\n for how this should occur::\n\n q = session.query(User).join(User.addresses)\n\n Where above, the call to :meth:`~.Query.join` along ``User.addresses``\n will result in SQL equivalent to::\n\n SELECT user.* FROM user JOIN address ON user.id = address.user_id\n\n In the above example we refer to ``User.addresses`` as passed to\n :meth:`~.Query.join` as the *on clause*, that is, it indicates\n how the "ON" portion of the JOIN should be constructed. For a\n single-entity query such as the one above (i.e. we start by selecting\n only from ``User`` and nothing else), the relationship can also be\n specified by its string name::\n\n q = session.query(User).join("addresses")\n\n :meth:`~.Query.join` can also accommodate multiple\n "on clause" arguments to produce a chain of joins, such as below\n where a join across four related entities is constructed::\n\n q = session.query(User).join("orders", "items", "keywords")\n\n The above would be shorthand for three separate calls to\n :meth:`~.Query.join`, each using an explicit attribute to indicate\n the source entity::\n\n q = session.query(User).\\\n join(User.orders).\\\n join(Order.items).\\\n join(Item.keywords)\n\n **Joins to a Target Entity or Selectable**\n\n A second form of :meth:`~.Query.join` allows any mapped entity\n or core selectable construct as a target. In this usage,\n :meth:`~.Query.join` will attempt\n to create a JOIN along the natural foreign key relationship between\n two entities::\n\n q = session.query(User).join(Address)\n\n The above calling form of :meth:`~.Query.join` will raise an error if\n either there are no foreign keys between the two entities, or if\n there are multiple foreign key linkages between them. In the\n above calling form, :meth:`~.Query.join` is called upon to\n create the "on clause" automatically for us. The target can\n be any mapped entity or selectable, such as a :class:`.Table`::\n\n q = session.query(User).join(addresses_table)\n\n **Joins to a Target with an ON Clause**\n\n The third calling form allows both the target entity as well\n as the ON clause to be passed explicitly. Suppose for\n example we wanted to join to ``Address`` twice, using\n an alias the second time. We use :func:`~sqlalchemy.orm.aliased`\n to create a distinct alias of ``Address``, and join\n to it using the ``target, onclause`` form, so that the\n alias can be specified explicitly as the target along with\n the relationship to instruct how the ON clause should proceed::\n\n a_alias = aliased(Address)\n\n q = session.query(User).\\\n join(User.addresses).\\\n join(a_alias, User.addresses).\\\n filter(Address.email_address==\'[email protected]\').\\\n filter(a_alias.email_address==\'[email protected]\')\n\n Where above, the generated SQL would be similar to::\n\n SELECT user.* FROM user\n JOIN address ON user.id = address.user_id\n JOIN address AS address_1 ON user.id=address_1.user_id\n WHERE address.email_address = :email_address_1\n AND address_1.email_address = :email_address_2\n\n The two-argument calling form of :meth:`~.Query.join`\n also allows us to construct arbitrary joins with SQL-oriented\n "on clause" expressions, not relying upon configured relationships\n at all. Any SQL expression can be passed as the ON clause\n when using the two-argument form, which should refer to the target\n entity in some way as well as an applicable source entity::\n\n q = session.query(User).join(Address, User.id==Address.user_id)\n\n .. versionchanged:: 0.7\n In SQLAlchemy 0.6 and earlier, the two argument form of\n :meth:`~.Query.join` requires the usage of a tuple:\n ``query(User).join((Address, User.id==Address.user_id))``\\ .\n This calling form is accepted in 0.7 and further, though\n is not necessary unless multiple join conditions are passed to\n a single :meth:`~.Query.join` call, which itself is also not\n generally necessary as it is now equivalent to multiple\n calls (this wasn\'t always the case).\n\n **Advanced Join Targeting and Adaption**\n\n There is a lot of flexibility in what the "target" can be when using\n :meth:`~.Query.join`. As noted previously, it also accepts\n :class:`.Table` constructs and other selectables such as\n :func:`.alias` and :func:`.select` constructs, with either the one\n or two-argument forms::\n\n addresses_q = select([Address.user_id]).\\\n where(Address.email_address.endswith("@bar.com")).\\\n alias()\n\n q = session.query(User).\\\n join(addresses_q, addresses_q.c.user_id==User.id)\n\n :meth:`~.Query.join` also features the ability to *adapt* a\n :meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target\n selectable. Below we construct a JOIN from ``User`` to a subquery\n against ``Address``, allowing the relationship denoted by\n ``User.addresses`` to *adapt* itself to the altered target::\n\n address_subq = session.query(Address).\\\n filter(Address.email_address == \'[email protected]\').\\\n subquery()\n\n q = session.query(User).join(address_subq, User.addresses)\n\n Producing SQL similar to::\n\n SELECT user.* FROM user\n JOIN (\n SELECT address.id AS id,\n address.user_id AS user_id,\n address.email_address AS email_address\n FROM address\n WHERE address.email_address = :email_address_1\n ) AS anon_1 ON user.id = anon_1.user_id\n\n The above form allows one to fall back onto an explicit ON\n clause at any time::\n\n q = session.query(User).\\\n join(address_subq, User.id==address_subq.c.user_id)\n\n **Controlling what to Join From**\n\n While :meth:`~.Query.join` exclusively deals with the "right"\n side of the JOIN, we can also control the "left" side, in those\n cases where it\'s needed, using :meth:`~.Query.select_from`.\n Below we construct a query against ``Address`` but can still\n make usage of ``User.addresses`` as our ON clause by instructing\n the :class:`.Query` to select first from the ``User``\n entity::\n\n q = session.query(Address).select_from(User).\\\n join(User.addresses).\\\n filter(User.name == \'ed\')\n\n Which will produce SQL similar to::\n\n SELECT address.* FROM user\n JOIN address ON user.id=address.user_id\n WHERE user.name = :name_1\n\n **Constructing Aliases Anonymously**\n\n :meth:`~.Query.join` can construct anonymous aliases\n using the ``aliased=True`` flag. This feature is useful\n when a query is being joined algorithmically, such as\n when querying self-referentially to an arbitrary depth::\n\n q = session.query(Node).\\\n join("children", "children", aliased=True)\n\n When ``aliased=True`` is used, the actual "alias" construct\n is not explicitly available. To work with it, methods such as\n :meth:`.Query.filter` will adapt the incoming entity to\n the last join point::\n\n q = session.query(Node).\\\n join("children", "children", aliased=True).\\\n filter(Node.name == \'grandchild 1\')\n\n When using automatic aliasing, the ``from_joinpoint=True``\n argument can allow a multi-node join to be broken into\n multiple calls to :meth:`~.Query.join`, so that\n each path along the way can be further filtered::\n\n q = session.query(Node).\\\n join("children", aliased=True).\\\n filter(Node.name=\'child 1\').\\\n join("children", aliased=True, from_joinpoint=True).\\\n filter(Node.name == \'grandchild 1\')\n\n The filtering aliases above can then be reset back to the\n original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::\n\n q = session.query(Node).\\\n join("children", "children", aliased=True).\\\n filter(Node.name == \'grandchild 1\').\\\n reset_joinpoint().\\\n filter(Node.name == \'parent 1)\n\n For an example of ``aliased=True``, see the distribution\n example :ref:`examples_xmlpersistence` which illustrates\n an XPath-like query system using algorithmic joins.\n\n :param \\*props: A collection of one or more join conditions,\n each consisting of a relationship-bound attribute or string\n relationship name representing an "on clause", or a single\n target entity, or a tuple in the form of ``(target, onclause)``.\n A special two-argument calling form of the form ``target, onclause``\n is also accepted.\n :param aliased=False: If True, indicate that the JOIN target should be\n anonymously aliased. Subsequent calls to :meth:`~.Query.filter`\n and similar will adapt the incoming criterion to the target\n alias, until :meth:`~.Query.reset_joinpoint` is called.\n :param isouter=False: If True, the join used will be a left outer join,\n just as if the :meth:`.Query.outerjoin` method were called. This\n flag is here to maintain consistency with the same flag as accepted\n by :meth:`.FromClause.join` and other Core constructs.\n\n\n .. versionadded:: 1.0.0\n\n :param from_joinpoint=False: When using ``aliased=True``, a setting\n of True here will cause the join to be from the most recent\n joined target, rather than starting back from the original\n FROM clauses of the query.\n\n .. seealso::\n\n :ref:`ormtutorial_joins` in the ORM tutorial.\n\n :ref:`inheritance_toplevel` for details on how\n :meth:`~.Query.join` is used for inheritance relationships.\n\n :func:`.orm.join` - a standalone ORM-level join function,\n used internally by :meth:`.Query.join`, which in previous\n SQLAlchemy versions was the primary ORM-level joining interface.\n\n ' (aliased, from_joinpoint, isouter) = (kwargs.pop('aliased', False), kwargs.pop('from_joinpoint', False), kwargs.pop('isouter', False)) if kwargs: raise TypeError(('unknown arguments: %s' % ','.join(kwargs.keys))) isouter = isouter return self._join(props, outerjoin=isouter, create_aliases=aliased, from_joinpoint=from_joinpoint)
-5,546,919,944,220,456,000
Create a SQL JOIN against this :class:`.Query` object's criterion and apply generatively, returning the newly resulting :class:`.Query`. **Simple Relationship Joins** Consider a mapping between two classes ``User`` and ``Address``, with a relationship ``User.addresses`` representing a collection of ``Address`` objects associated with each ``User``. The most common usage of :meth:`~.Query.join` is to create a JOIN along this relationship, using the ``User.addresses`` attribute as an indicator for how this should occur:: q = session.query(User).join(User.addresses) Where above, the call to :meth:`~.Query.join` along ``User.addresses`` will result in SQL equivalent to:: SELECT user.* FROM user JOIN address ON user.id = address.user_id In the above example we refer to ``User.addresses`` as passed to :meth:`~.Query.join` as the *on clause*, that is, it indicates how the "ON" portion of the JOIN should be constructed. For a single-entity query such as the one above (i.e. we start by selecting only from ``User`` and nothing else), the relationship can also be specified by its string name:: q = session.query(User).join("addresses") :meth:`~.Query.join` can also accommodate multiple "on clause" arguments to produce a chain of joins, such as below where a join across four related entities is constructed:: q = session.query(User).join("orders", "items", "keywords") The above would be shorthand for three separate calls to :meth:`~.Query.join`, each using an explicit attribute to indicate the source entity:: q = session.query(User).\ join(User.orders).\ join(Order.items).\ join(Item.keywords) **Joins to a Target Entity or Selectable** A second form of :meth:`~.Query.join` allows any mapped entity or core selectable construct as a target. In this usage, :meth:`~.Query.join` will attempt to create a JOIN along the natural foreign key relationship between two entities:: q = session.query(User).join(Address) The above calling form of :meth:`~.Query.join` will raise an error if either there are no foreign keys between the two entities, or if there are multiple foreign key linkages between them. In the above calling form, :meth:`~.Query.join` is called upon to create the "on clause" automatically for us. The target can be any mapped entity or selectable, such as a :class:`.Table`:: q = session.query(User).join(addresses_table) **Joins to a Target with an ON Clause** The third calling form allows both the target entity as well as the ON clause to be passed explicitly. Suppose for example we wanted to join to ``Address`` twice, using an alias the second time. We use :func:`~sqlalchemy.orm.aliased` to create a distinct alias of ``Address``, and join to it using the ``target, onclause`` form, so that the alias can be specified explicitly as the target along with the relationship to instruct how the ON clause should proceed:: a_alias = aliased(Address) q = session.query(User).\ join(User.addresses).\ join(a_alias, User.addresses).\ filter(Address.email_address=='[email protected]').\ filter(a_alias.email_address=='[email protected]') Where above, the generated SQL would be similar to:: SELECT user.* FROM user JOIN address ON user.id = address.user_id JOIN address AS address_1 ON user.id=address_1.user_id WHERE address.email_address = :email_address_1 AND address_1.email_address = :email_address_2 The two-argument calling form of :meth:`~.Query.join` also allows us to construct arbitrary joins with SQL-oriented "on clause" expressions, not relying upon configured relationships at all. Any SQL expression can be passed as the ON clause when using the two-argument form, which should refer to the target entity in some way as well as an applicable source entity:: q = session.query(User).join(Address, User.id==Address.user_id) .. versionchanged:: 0.7 In SQLAlchemy 0.6 and earlier, the two argument form of :meth:`~.Query.join` requires the usage of a tuple: ``query(User).join((Address, User.id==Address.user_id))``\ . This calling form is accepted in 0.7 and further, though is not necessary unless multiple join conditions are passed to a single :meth:`~.Query.join` call, which itself is also not generally necessary as it is now equivalent to multiple calls (this wasn't always the case). **Advanced Join Targeting and Adaption** There is a lot of flexibility in what the "target" can be when using :meth:`~.Query.join`. As noted previously, it also accepts :class:`.Table` constructs and other selectables such as :func:`.alias` and :func:`.select` constructs, with either the one or two-argument forms:: addresses_q = select([Address.user_id]).\ where(Address.email_address.endswith("@bar.com")).\ alias() q = session.query(User).\ join(addresses_q, addresses_q.c.user_id==User.id) :meth:`~.Query.join` also features the ability to *adapt* a :meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target selectable. Below we construct a JOIN from ``User`` to a subquery against ``Address``, allowing the relationship denoted by ``User.addresses`` to *adapt* itself to the altered target:: address_subq = session.query(Address).\ filter(Address.email_address == '[email protected]').\ subquery() q = session.query(User).join(address_subq, User.addresses) Producing SQL similar to:: SELECT user.* FROM user JOIN ( SELECT address.id AS id, address.user_id AS user_id, address.email_address AS email_address FROM address WHERE address.email_address = :email_address_1 ) AS anon_1 ON user.id = anon_1.user_id The above form allows one to fall back onto an explicit ON clause at any time:: q = session.query(User).\ join(address_subq, User.id==address_subq.c.user_id) **Controlling what to Join From** While :meth:`~.Query.join` exclusively deals with the "right" side of the JOIN, we can also control the "left" side, in those cases where it's needed, using :meth:`~.Query.select_from`. Below we construct a query against ``Address`` but can still make usage of ``User.addresses`` as our ON clause by instructing the :class:`.Query` to select first from the ``User`` entity:: q = session.query(Address).select_from(User).\ join(User.addresses).\ filter(User.name == 'ed') Which will produce SQL similar to:: SELECT address.* FROM user JOIN address ON user.id=address.user_id WHERE user.name = :name_1 **Constructing Aliases Anonymously** :meth:`~.Query.join` can construct anonymous aliases using the ``aliased=True`` flag. This feature is useful when a query is being joined algorithmically, such as when querying self-referentially to an arbitrary depth:: q = session.query(Node).\ join("children", "children", aliased=True) When ``aliased=True`` is used, the actual "alias" construct is not explicitly available. To work with it, methods such as :meth:`.Query.filter` will adapt the incoming entity to the last join point:: q = session.query(Node).\ join("children", "children", aliased=True).\ filter(Node.name == 'grandchild 1') When using automatic aliasing, the ``from_joinpoint=True`` argument can allow a multi-node join to be broken into multiple calls to :meth:`~.Query.join`, so that each path along the way can be further filtered:: q = session.query(Node).\ join("children", aliased=True).\ filter(Node.name='child 1').\ join("children", aliased=True, from_joinpoint=True).\ filter(Node.name == 'grandchild 1') The filtering aliases above can then be reset back to the original ``Node`` entity using :meth:`~.Query.reset_joinpoint`:: q = session.query(Node).\ join("children", "children", aliased=True).\ filter(Node.name == 'grandchild 1').\ reset_joinpoint().\ filter(Node.name == 'parent 1) For an example of ``aliased=True``, see the distribution example :ref:`examples_xmlpersistence` which illustrates an XPath-like query system using algorithmic joins. :param \*props: A collection of one or more join conditions, each consisting of a relationship-bound attribute or string relationship name representing an "on clause", or a single target entity, or a tuple in the form of ``(target, onclause)``. A special two-argument calling form of the form ``target, onclause`` is also accepted. :param aliased=False: If True, indicate that the JOIN target should be anonymously aliased. Subsequent calls to :meth:`~.Query.filter` and similar will adapt the incoming criterion to the target alias, until :meth:`~.Query.reset_joinpoint` is called. :param isouter=False: If True, the join used will be a left outer join, just as if the :meth:`.Query.outerjoin` method were called. This flag is here to maintain consistency with the same flag as accepted by :meth:`.FromClause.join` and other Core constructs. .. versionadded:: 1.0.0 :param from_joinpoint=False: When using ``aliased=True``, a setting of True here will cause the join to be from the most recent joined target, rather than starting back from the original FROM clauses of the query. .. seealso:: :ref:`ormtutorial_joins` in the ORM tutorial. :ref:`inheritance_toplevel` for details on how :meth:`~.Query.join` is used for inheritance relationships. :func:`.orm.join` - a standalone ORM-level join function, used internally by :meth:`.Query.join`, which in previous SQLAlchemy versions was the primary ORM-level joining interface.
lib/sqlalchemy/orm/query.py
join
slafs/sqlalchemy
python
def join(self, *props, **kwargs): 'Create a SQL JOIN against this :class:`.Query` object\'s criterion\n and apply generatively, returning the newly resulting :class:`.Query`.\n\n **Simple Relationship Joins**\n\n Consider a mapping between two classes ``User`` and ``Address``,\n with a relationship ``User.addresses`` representing a collection\n of ``Address`` objects associated with each ``User``. The most\n common usage of :meth:`~.Query.join` is to create a JOIN along this\n relationship, using the ``User.addresses`` attribute as an indicator\n for how this should occur::\n\n q = session.query(User).join(User.addresses)\n\n Where above, the call to :meth:`~.Query.join` along ``User.addresses``\n will result in SQL equivalent to::\n\n SELECT user.* FROM user JOIN address ON user.id = address.user_id\n\n In the above example we refer to ``User.addresses`` as passed to\n :meth:`~.Query.join` as the *on clause*, that is, it indicates\n how the "ON" portion of the JOIN should be constructed. For a\n single-entity query such as the one above (i.e. we start by selecting\n only from ``User`` and nothing else), the relationship can also be\n specified by its string name::\n\n q = session.query(User).join("addresses")\n\n :meth:`~.Query.join` can also accommodate multiple\n "on clause" arguments to produce a chain of joins, such as below\n where a join across four related entities is constructed::\n\n q = session.query(User).join("orders", "items", "keywords")\n\n The above would be shorthand for three separate calls to\n :meth:`~.Query.join`, each using an explicit attribute to indicate\n the source entity::\n\n q = session.query(User).\\\n join(User.orders).\\\n join(Order.items).\\\n join(Item.keywords)\n\n **Joins to a Target Entity or Selectable**\n\n A second form of :meth:`~.Query.join` allows any mapped entity\n or core selectable construct as a target. In this usage,\n :meth:`~.Query.join` will attempt\n to create a JOIN along the natural foreign key relationship between\n two entities::\n\n q = session.query(User).join(Address)\n\n The above calling form of :meth:`~.Query.join` will raise an error if\n either there are no foreign keys between the two entities, or if\n there are multiple foreign key linkages between them. In the\n above calling form, :meth:`~.Query.join` is called upon to\n create the "on clause" automatically for us. The target can\n be any mapped entity or selectable, such as a :class:`.Table`::\n\n q = session.query(User).join(addresses_table)\n\n **Joins to a Target with an ON Clause**\n\n The third calling form allows both the target entity as well\n as the ON clause to be passed explicitly. Suppose for\n example we wanted to join to ``Address`` twice, using\n an alias the second time. We use :func:`~sqlalchemy.orm.aliased`\n to create a distinct alias of ``Address``, and join\n to it using the ``target, onclause`` form, so that the\n alias can be specified explicitly as the target along with\n the relationship to instruct how the ON clause should proceed::\n\n a_alias = aliased(Address)\n\n q = session.query(User).\\\n join(User.addresses).\\\n join(a_alias, User.addresses).\\\n filter(Address.email_address==\'[email protected]\').\\\n filter(a_alias.email_address==\'[email protected]\')\n\n Where above, the generated SQL would be similar to::\n\n SELECT user.* FROM user\n JOIN address ON user.id = address.user_id\n JOIN address AS address_1 ON user.id=address_1.user_id\n WHERE address.email_address = :email_address_1\n AND address_1.email_address = :email_address_2\n\n The two-argument calling form of :meth:`~.Query.join`\n also allows us to construct arbitrary joins with SQL-oriented\n "on clause" expressions, not relying upon configured relationships\n at all. Any SQL expression can be passed as the ON clause\n when using the two-argument form, which should refer to the target\n entity in some way as well as an applicable source entity::\n\n q = session.query(User).join(Address, User.id==Address.user_id)\n\n .. versionchanged:: 0.7\n In SQLAlchemy 0.6 and earlier, the two argument form of\n :meth:`~.Query.join` requires the usage of a tuple:\n ``query(User).join((Address, User.id==Address.user_id))``\\ .\n This calling form is accepted in 0.7 and further, though\n is not necessary unless multiple join conditions are passed to\n a single :meth:`~.Query.join` call, which itself is also not\n generally necessary as it is now equivalent to multiple\n calls (this wasn\'t always the case).\n\n **Advanced Join Targeting and Adaption**\n\n There is a lot of flexibility in what the "target" can be when using\n :meth:`~.Query.join`. As noted previously, it also accepts\n :class:`.Table` constructs and other selectables such as\n :func:`.alias` and :func:`.select` constructs, with either the one\n or two-argument forms::\n\n addresses_q = select([Address.user_id]).\\\n where(Address.email_address.endswith("@bar.com")).\\\n alias()\n\n q = session.query(User).\\\n join(addresses_q, addresses_q.c.user_id==User.id)\n\n :meth:`~.Query.join` also features the ability to *adapt* a\n :meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target\n selectable. Below we construct a JOIN from ``User`` to a subquery\n against ``Address``, allowing the relationship denoted by\n ``User.addresses`` to *adapt* itself to the altered target::\n\n address_subq = session.query(Address).\\\n filter(Address.email_address == \'[email protected]\').\\\n subquery()\n\n q = session.query(User).join(address_subq, User.addresses)\n\n Producing SQL similar to::\n\n SELECT user.* FROM user\n JOIN (\n SELECT address.id AS id,\n address.user_id AS user_id,\n address.email_address AS email_address\n FROM address\n WHERE address.email_address = :email_address_1\n ) AS anon_1 ON user.id = anon_1.user_id\n\n The above form allows one to fall back onto an explicit ON\n clause at any time::\n\n q = session.query(User).\\\n join(address_subq, User.id==address_subq.c.user_id)\n\n **Controlling what to Join From**\n\n While :meth:`~.Query.join` exclusively deals with the "right"\n side of the JOIN, we can also control the "left" side, in those\n cases where it\'s needed, using :meth:`~.Query.select_from`.\n Below we construct a query against ``Address`` but can still\n make usage of ``User.addresses`` as our ON clause by instructing\n the :class:`.Query` to select first from the ``User``\n entity::\n\n q = session.query(Address).select_from(User).\\\n join(User.addresses).\\\n filter(User.name == \'ed\')\n\n Which will produce SQL similar to::\n\n SELECT address.* FROM user\n JOIN address ON user.id=address.user_id\n WHERE user.name = :name_1\n\n **Constructing Aliases Anonymously**\n\n :meth:`~.Query.join` can construct anonymous aliases\n using the ``aliased=True`` flag. This feature is useful\n when a query is being joined algorithmically, such as\n when querying self-referentially to an arbitrary depth::\n\n q = session.query(Node).\\\n join("children", "children", aliased=True)\n\n When ``aliased=True`` is used, the actual "alias" construct\n is not explicitly available. To work with it, methods such as\n :meth:`.Query.filter` will adapt the incoming entity to\n the last join point::\n\n q = session.query(Node).\\\n join("children", "children", aliased=True).\\\n filter(Node.name == \'grandchild 1\')\n\n When using automatic aliasing, the ``from_joinpoint=True``\n argument can allow a multi-node join to be broken into\n multiple calls to :meth:`~.Query.join`, so that\n each path along the way can be further filtered::\n\n q = session.query(Node).\\\n join("children", aliased=True).\\\n filter(Node.name=\'child 1\').\\\n join("children", aliased=True, from_joinpoint=True).\\\n filter(Node.name == \'grandchild 1\')\n\n The filtering aliases above can then be reset back to the\n original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::\n\n q = session.query(Node).\\\n join("children", "children", aliased=True).\\\n filter(Node.name == \'grandchild 1\').\\\n reset_joinpoint().\\\n filter(Node.name == \'parent 1)\n\n For an example of ``aliased=True``, see the distribution\n example :ref:`examples_xmlpersistence` which illustrates\n an XPath-like query system using algorithmic joins.\n\n :param \\*props: A collection of one or more join conditions,\n each consisting of a relationship-bound attribute or string\n relationship name representing an "on clause", or a single\n target entity, or a tuple in the form of ``(target, onclause)``.\n A special two-argument calling form of the form ``target, onclause``\n is also accepted.\n :param aliased=False: If True, indicate that the JOIN target should be\n anonymously aliased. Subsequent calls to :meth:`~.Query.filter`\n and similar will adapt the incoming criterion to the target\n alias, until :meth:`~.Query.reset_joinpoint` is called.\n :param isouter=False: If True, the join used will be a left outer join,\n just as if the :meth:`.Query.outerjoin` method were called. This\n flag is here to maintain consistency with the same flag as accepted\n by :meth:`.FromClause.join` and other Core constructs.\n\n\n .. versionadded:: 1.0.0\n\n :param from_joinpoint=False: When using ``aliased=True``, a setting\n of True here will cause the join to be from the most recent\n joined target, rather than starting back from the original\n FROM clauses of the query.\n\n .. seealso::\n\n :ref:`ormtutorial_joins` in the ORM tutorial.\n\n :ref:`inheritance_toplevel` for details on how\n :meth:`~.Query.join` is used for inheritance relationships.\n\n :func:`.orm.join` - a standalone ORM-level join function,\n used internally by :meth:`.Query.join`, which in previous\n SQLAlchemy versions was the primary ORM-level joining interface.\n\n ' (aliased, from_joinpoint, isouter) = (kwargs.pop('aliased', False), kwargs.pop('from_joinpoint', False), kwargs.pop('isouter', False)) if kwargs: raise TypeError(('unknown arguments: %s' % ','.join(kwargs.keys))) isouter = isouter return self._join(props, outerjoin=isouter, create_aliases=aliased, from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs): "Create a left outer join against this ``Query`` object's criterion\n and apply generatively, returning the newly resulting ``Query``.\n\n Usage is the same as the ``join()`` method.\n\n " (aliased, from_joinpoint) = (kwargs.pop('aliased', False), kwargs.pop('from_joinpoint', False)) if kwargs: raise TypeError(('unknown arguments: %s' % ','.join(kwargs))) return self._join(props, outerjoin=True, create_aliases=aliased, from_joinpoint=from_joinpoint)
-7,954,997,624,322,960,000
Create a left outer join against this ``Query`` object's criterion and apply generatively, returning the newly resulting ``Query``. Usage is the same as the ``join()`` method.
lib/sqlalchemy/orm/query.py
outerjoin
slafs/sqlalchemy
python
def outerjoin(self, *props, **kwargs): "Create a left outer join against this ``Query`` object's criterion\n and apply generatively, returning the newly resulting ``Query``.\n\n Usage is the same as the ``join()`` method.\n\n " (aliased, from_joinpoint) = (kwargs.pop('aliased', False), kwargs.pop('from_joinpoint', False)) if kwargs: raise TypeError(('unknown arguments: %s' % ','.join(kwargs))) return self._join(props, outerjoin=True, create_aliases=aliased, from_joinpoint=from_joinpoint)
@_generative(_no_statement_condition, _no_limit_offset) def _join(self, keys, outerjoin, create_aliases, from_joinpoint): 'consumes arguments from join() or outerjoin(), places them into a\n consistent format with which to form the actual JOIN constructs.\n\n ' if (not from_joinpoint): self._reset_joinpoint() if ((len(keys) == 2) and isinstance(keys[0], (expression.FromClause, type, AliasedClass)) and isinstance(keys[1], (str, expression.ClauseElement, interfaces.PropComparator))): keys = (keys,) for arg1 in util.to_list(keys): if isinstance(arg1, tuple): (arg1, arg2) = arg1 else: arg2 = None if isinstance(arg1, (interfaces.PropComparator, util.string_types)): (right_entity, onclause) = (arg2, arg1) else: (right_entity, onclause) = (arg1, arg2) left_entity = prop = None if isinstance(onclause, interfaces.PropComparator): of_type = getattr(onclause, '_of_type', None) else: of_type = None if isinstance(onclause, util.string_types): left_entity = self._joinpoint_zero() descriptor = _entity_descriptor(left_entity, onclause) onclause = descriptor elif (from_joinpoint and isinstance(onclause, interfaces.PropComparator)): left_entity = onclause._parententity info = inspect(self._joinpoint_zero()) (left_mapper, left_selectable, left_is_aliased) = (getattr(info, 'mapper', None), info.selectable, getattr(info, 'is_aliased_class', None)) if (left_mapper is left_entity): left_entity = self._joinpoint_zero() descriptor = _entity_descriptor(left_entity, onclause.key) onclause = descriptor if isinstance(onclause, interfaces.PropComparator): if (right_entity is None): if of_type: right_entity = of_type else: right_entity = onclause.property.mapper left_entity = onclause._parententity prop = onclause.property if (not isinstance(onclause, attributes.QueryableAttribute)): onclause = prop if (not create_aliases): edge = (left_entity, right_entity, prop.key) if (edge in self._joinpoint): jp = self._joinpoint[edge].copy() jp['prev'] = (edge, self._joinpoint) self._update_joinpoint(jp) continue elif ((onclause is not None) and (right_entity is None)): raise NotImplementedError('query.join(a==b) not supported.') self._join_left_to_right(left_entity, right_entity, onclause, outerjoin, create_aliases, prop)
6,434,774,937,667,570,000
consumes arguments from join() or outerjoin(), places them into a consistent format with which to form the actual JOIN constructs.
lib/sqlalchemy/orm/query.py
_join
slafs/sqlalchemy
python
@_generative(_no_statement_condition, _no_limit_offset) def _join(self, keys, outerjoin, create_aliases, from_joinpoint): 'consumes arguments from join() or outerjoin(), places them into a\n consistent format with which to form the actual JOIN constructs.\n\n ' if (not from_joinpoint): self._reset_joinpoint() if ((len(keys) == 2) and isinstance(keys[0], (expression.FromClause, type, AliasedClass)) and isinstance(keys[1], (str, expression.ClauseElement, interfaces.PropComparator))): keys = (keys,) for arg1 in util.to_list(keys): if isinstance(arg1, tuple): (arg1, arg2) = arg1 else: arg2 = None if isinstance(arg1, (interfaces.PropComparator, util.string_types)): (right_entity, onclause) = (arg2, arg1) else: (right_entity, onclause) = (arg1, arg2) left_entity = prop = None if isinstance(onclause, interfaces.PropComparator): of_type = getattr(onclause, '_of_type', None) else: of_type = None if isinstance(onclause, util.string_types): left_entity = self._joinpoint_zero() descriptor = _entity_descriptor(left_entity, onclause) onclause = descriptor elif (from_joinpoint and isinstance(onclause, interfaces.PropComparator)): left_entity = onclause._parententity info = inspect(self._joinpoint_zero()) (left_mapper, left_selectable, left_is_aliased) = (getattr(info, 'mapper', None), info.selectable, getattr(info, 'is_aliased_class', None)) if (left_mapper is left_entity): left_entity = self._joinpoint_zero() descriptor = _entity_descriptor(left_entity, onclause.key) onclause = descriptor if isinstance(onclause, interfaces.PropComparator): if (right_entity is None): if of_type: right_entity = of_type else: right_entity = onclause.property.mapper left_entity = onclause._parententity prop = onclause.property if (not isinstance(onclause, attributes.QueryableAttribute)): onclause = prop if (not create_aliases): edge = (left_entity, right_entity, prop.key) if (edge in self._joinpoint): jp = self._joinpoint[edge].copy() jp['prev'] = (edge, self._joinpoint) self._update_joinpoint(jp) continue elif ((onclause is not None) and (right_entity is None)): raise NotImplementedError('query.join(a==b) not supported.') self._join_left_to_right(left_entity, right_entity, onclause, outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right, onclause, outerjoin, create_aliases, prop): "append a JOIN to the query's from clause." self._polymorphic_adapters = self._polymorphic_adapters.copy() if (left is None): if self._from_obj: left = self._from_obj[0] elif self._entities: left = self._entities[0].entity_zero_or_selectable if (left is None): raise sa_exc.InvalidRequestError(("Don't know how to join from %s; please use select_from() to establish the left entity/selectable of this join" % self._entities[0])) if ((left is right) and (not create_aliases)): raise sa_exc.InvalidRequestError(("Can't construct a join from %s to %s, they are the same entity" % (left, right))) l_info = inspect(left) r_info = inspect(right) overlap = False if (not create_aliases): right_mapper = getattr(r_info, 'mapper', None) if (right_mapper and (right_mapper.with_polymorphic or isinstance(right_mapper.mapped_table, expression.Join))): for from_obj in (self._from_obj or [l_info.selectable]): if (sql_util.selectables_overlap(l_info.selectable, from_obj) and sql_util.selectables_overlap(from_obj, r_info.selectable)): overlap = True break if ((overlap or (not create_aliases)) and (l_info.selectable is r_info.selectable)): raise sa_exc.InvalidRequestError(("Can't join table/selectable '%s' to itself" % l_info.selectable)) (right, onclause) = self._prepare_right_side(r_info, right, onclause, create_aliases, prop, overlap) if ((not create_aliases) and prop): self._update_joinpoint({'_joinpoint_entity': right, 'prev': ((left, right, prop.key), self._joinpoint)}) else: self._joinpoint = {'_joinpoint_entity': right} self._join_to_left(l_info, left, right, onclause, outerjoin)
4,310,489,427,954,364,400
append a JOIN to the query's from clause.
lib/sqlalchemy/orm/query.py
_join_left_to_right
slafs/sqlalchemy
python
def _join_left_to_right(self, left, right, onclause, outerjoin, create_aliases, prop): self._polymorphic_adapters = self._polymorphic_adapters.copy() if (left is None): if self._from_obj: left = self._from_obj[0] elif self._entities: left = self._entities[0].entity_zero_or_selectable if (left is None): raise sa_exc.InvalidRequestError(("Don't know how to join from %s; please use select_from() to establish the left entity/selectable of this join" % self._entities[0])) if ((left is right) and (not create_aliases)): raise sa_exc.InvalidRequestError(("Can't construct a join from %s to %s, they are the same entity" % (left, right))) l_info = inspect(left) r_info = inspect(right) overlap = False if (not create_aliases): right_mapper = getattr(r_info, 'mapper', None) if (right_mapper and (right_mapper.with_polymorphic or isinstance(right_mapper.mapped_table, expression.Join))): for from_obj in (self._from_obj or [l_info.selectable]): if (sql_util.selectables_overlap(l_info.selectable, from_obj) and sql_util.selectables_overlap(from_obj, r_info.selectable)): overlap = True break if ((overlap or (not create_aliases)) and (l_info.selectable is r_info.selectable)): raise sa_exc.InvalidRequestError(("Can't join table/selectable '%s' to itself" % l_info.selectable)) (right, onclause) = self._prepare_right_side(r_info, right, onclause, create_aliases, prop, overlap) if ((not create_aliases) and prop): self._update_joinpoint({'_joinpoint_entity': right, 'prev': ((left, right, prop.key), self._joinpoint)}) else: self._joinpoint = {'_joinpoint_entity': right} self._join_to_left(l_info, left, right, onclause, outerjoin)
@_generative(_no_statement_condition) def reset_joinpoint(self): 'Return a new :class:`.Query`, where the "join point" has\n been reset back to the base FROM entities of the query.\n\n This method is usually used in conjunction with the\n ``aliased=True`` feature of the :meth:`~.Query.join`\n method. See the example in :meth:`~.Query.join` for how\n this is used.\n\n ' self._reset_joinpoint()
-4,546,611,727,478,187,500
Return a new :class:`.Query`, where the "join point" has been reset back to the base FROM entities of the query. This method is usually used in conjunction with the ``aliased=True`` feature of the :meth:`~.Query.join` method. See the example in :meth:`~.Query.join` for how this is used.
lib/sqlalchemy/orm/query.py
reset_joinpoint
slafs/sqlalchemy
python
@_generative(_no_statement_condition) def reset_joinpoint(self): 'Return a new :class:`.Query`, where the "join point" has\n been reset back to the base FROM entities of the query.\n\n This method is usually used in conjunction with the\n ``aliased=True`` feature of the :meth:`~.Query.join`\n method. See the example in :meth:`~.Query.join` for how\n this is used.\n\n ' self._reset_joinpoint()
@_generative(_no_clauseelement_condition) def select_from(self, *from_obj): 'Set the FROM clause of this :class:`.Query` explicitly.\n\n :meth:`.Query.select_from` is often used in conjunction with\n :meth:`.Query.join` in order to control which entity is selected\n from on the "left" side of the join.\n\n The entity or selectable object here effectively replaces the\n "left edge" of any calls to :meth:`~.Query.join`, when no\n joinpoint is otherwise established - usually, the default "join\n point" is the leftmost entity in the :class:`~.Query` object\'s\n list of entities to be selected.\n\n A typical example::\n\n q = session.query(Address).select_from(User).\\\n join(User.addresses).\\\n filter(User.name == \'ed\')\n\n Which produces SQL equivalent to::\n\n SELECT address.* FROM user\n JOIN address ON user.id=address.user_id\n WHERE user.name = :name_1\n\n :param \\*from_obj: collection of one or more entities to apply\n to the FROM clause. Entities can be mapped classes,\n :class:`.AliasedClass` objects, :class:`.Mapper` objects\n as well as core :class:`.FromClause` elements like subqueries.\n\n .. versionchanged:: 0.9\n This method no longer applies the given FROM object\n to be the selectable from which matching entities\n select from; the :meth:`.select_entity_from` method\n now accomplishes this. See that method for a description\n of this behavior.\n\n .. seealso::\n\n :meth:`~.Query.join`\n\n :meth:`.Query.select_entity_from`\n\n ' self._set_select_from(from_obj, False)
6,294,455,819,122,400,000
Set the FROM clause of this :class:`.Query` explicitly. :meth:`.Query.select_from` is often used in conjunction with :meth:`.Query.join` in order to control which entity is selected from on the "left" side of the join. The entity or selectable object here effectively replaces the "left edge" of any calls to :meth:`~.Query.join`, when no joinpoint is otherwise established - usually, the default "join point" is the leftmost entity in the :class:`~.Query` object's list of entities to be selected. A typical example:: q = session.query(Address).select_from(User).\ join(User.addresses).\ filter(User.name == 'ed') Which produces SQL equivalent to:: SELECT address.* FROM user JOIN address ON user.id=address.user_id WHERE user.name = :name_1 :param \*from_obj: collection of one or more entities to apply to the FROM clause. Entities can be mapped classes, :class:`.AliasedClass` objects, :class:`.Mapper` objects as well as core :class:`.FromClause` elements like subqueries. .. versionchanged:: 0.9 This method no longer applies the given FROM object to be the selectable from which matching entities select from; the :meth:`.select_entity_from` method now accomplishes this. See that method for a description of this behavior. .. seealso:: :meth:`~.Query.join` :meth:`.Query.select_entity_from`
lib/sqlalchemy/orm/query.py
select_from
slafs/sqlalchemy
python
@_generative(_no_clauseelement_condition) def select_from(self, *from_obj): 'Set the FROM clause of this :class:`.Query` explicitly.\n\n :meth:`.Query.select_from` is often used in conjunction with\n :meth:`.Query.join` in order to control which entity is selected\n from on the "left" side of the join.\n\n The entity or selectable object here effectively replaces the\n "left edge" of any calls to :meth:`~.Query.join`, when no\n joinpoint is otherwise established - usually, the default "join\n point" is the leftmost entity in the :class:`~.Query` object\'s\n list of entities to be selected.\n\n A typical example::\n\n q = session.query(Address).select_from(User).\\\n join(User.addresses).\\\n filter(User.name == \'ed\')\n\n Which produces SQL equivalent to::\n\n SELECT address.* FROM user\n JOIN address ON user.id=address.user_id\n WHERE user.name = :name_1\n\n :param \\*from_obj: collection of one or more entities to apply\n to the FROM clause. Entities can be mapped classes,\n :class:`.AliasedClass` objects, :class:`.Mapper` objects\n as well as core :class:`.FromClause` elements like subqueries.\n\n .. versionchanged:: 0.9\n This method no longer applies the given FROM object\n to be the selectable from which matching entities\n select from; the :meth:`.select_entity_from` method\n now accomplishes this. See that method for a description\n of this behavior.\n\n .. seealso::\n\n :meth:`~.Query.join`\n\n :meth:`.Query.select_entity_from`\n\n ' self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition) def select_entity_from(self, from_obj): 'Set the FROM clause of this :class:`.Query` to a\n core selectable, applying it as a replacement FROM clause\n for corresponding mapped entities.\n\n This method is similar to the :meth:`.Query.select_from`\n method, in that it sets the FROM clause of the query. However,\n where :meth:`.Query.select_from` only affects what is placed\n in the FROM, this method also applies the given selectable\n to replace the FROM which the selected entities would normally\n select from.\n\n The given ``from_obj`` must be an instance of a :class:`.FromClause`,\n e.g. a :func:`.select` or :class:`.Alias` construct.\n\n An example would be a :class:`.Query` that selects ``User`` entities,\n but uses :meth:`.Query.select_entity_from` to have the entities\n selected from a :func:`.select` construct instead of the\n base ``user`` table::\n\n select_stmt = select([User]).where(User.id == 7)\n\n q = session.query(User).\\\n select_entity_from(select_stmt).\\\n filter(User.name == \'ed\')\n\n The query generated will select ``User`` entities directly\n from the given :func:`.select` construct, and will be::\n\n SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name\n FROM (SELECT "user".id AS id, "user".name AS name\n FROM "user"\n WHERE "user".id = :id_1) AS anon_1\n WHERE anon_1.name = :name_1\n\n Notice above that even the WHERE criterion was "adapted" such that\n the ``anon_1`` subquery effectively replaces all references to the\n ``user`` table, except for the one that it refers to internally.\n\n Compare this to :meth:`.Query.select_from`, which as of\n version 0.9, does not affect existing entities. The\n statement below::\n\n q = session.query(User).\\\n select_from(select_stmt).\\\n filter(User.name == \'ed\')\n\n Produces SQL where both the ``user`` table as well as the\n ``select_stmt`` construct are present as separate elements\n in the FROM clause. No "adaptation" of the ``user`` table\n is applied::\n\n SELECT "user".id AS user_id, "user".name AS user_name\n FROM "user", (SELECT "user".id AS id, "user".name AS name\n FROM "user"\n WHERE "user".id = :id_1) AS anon_1\n WHERE "user".name = :name_1\n\n :meth:`.Query.select_entity_from` maintains an older\n behavior of :meth:`.Query.select_from`. In modern usage,\n similar results can also be achieved using :func:`.aliased`::\n\n select_stmt = select([User]).where(User.id == 7)\n user_from_select = aliased(User, select_stmt.alias())\n\n q = session.query(user_from_select)\n\n :param from_obj: a :class:`.FromClause` object that will replace\n the FROM clause of this :class:`.Query`.\n\n .. seealso::\n\n :meth:`.Query.select_from`\n\n .. versionadded:: 0.8\n :meth:`.Query.select_entity_from` was added to specify\n the specific behavior of entity replacement, however\n the :meth:`.Query.select_from` maintains this behavior\n as well until 0.9.\n\n ' self._set_select_from([from_obj], True)
8,139,648,133,792,127,000
Set the FROM clause of this :class:`.Query` to a core selectable, applying it as a replacement FROM clause for corresponding mapped entities. This method is similar to the :meth:`.Query.select_from` method, in that it sets the FROM clause of the query. However, where :meth:`.Query.select_from` only affects what is placed in the FROM, this method also applies the given selectable to replace the FROM which the selected entities would normally select from. The given ``from_obj`` must be an instance of a :class:`.FromClause`, e.g. a :func:`.select` or :class:`.Alias` construct. An example would be a :class:`.Query` that selects ``User`` entities, but uses :meth:`.Query.select_entity_from` to have the entities selected from a :func:`.select` construct instead of the base ``user`` table:: select_stmt = select([User]).where(User.id == 7) q = session.query(User).\ select_entity_from(select_stmt).\ filter(User.name == 'ed') The query generated will select ``User`` entities directly from the given :func:`.select` construct, and will be:: SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 WHERE anon_1.name = :name_1 Notice above that even the WHERE criterion was "adapted" such that the ``anon_1`` subquery effectively replaces all references to the ``user`` table, except for the one that it refers to internally. Compare this to :meth:`.Query.select_from`, which as of version 0.9, does not affect existing entities. The statement below:: q = session.query(User).\ select_from(select_stmt).\ filter(User.name == 'ed') Produces SQL where both the ``user`` table as well as the ``select_stmt`` construct are present as separate elements in the FROM clause. No "adaptation" of the ``user`` table is applied:: SELECT "user".id AS user_id, "user".name AS user_name FROM "user", (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 WHERE "user".name = :name_1 :meth:`.Query.select_entity_from` maintains an older behavior of :meth:`.Query.select_from`. In modern usage, similar results can also be achieved using :func:`.aliased`:: select_stmt = select([User]).where(User.id == 7) user_from_select = aliased(User, select_stmt.alias()) q = session.query(user_from_select) :param from_obj: a :class:`.FromClause` object that will replace the FROM clause of this :class:`.Query`. .. seealso:: :meth:`.Query.select_from` .. versionadded:: 0.8 :meth:`.Query.select_entity_from` was added to specify the specific behavior of entity replacement, however the :meth:`.Query.select_from` maintains this behavior as well until 0.9.
lib/sqlalchemy/orm/query.py
select_entity_from
slafs/sqlalchemy
python
@_generative(_no_clauseelement_condition) def select_entity_from(self, from_obj): 'Set the FROM clause of this :class:`.Query` to a\n core selectable, applying it as a replacement FROM clause\n for corresponding mapped entities.\n\n This method is similar to the :meth:`.Query.select_from`\n method, in that it sets the FROM clause of the query. However,\n where :meth:`.Query.select_from` only affects what is placed\n in the FROM, this method also applies the given selectable\n to replace the FROM which the selected entities would normally\n select from.\n\n The given ``from_obj`` must be an instance of a :class:`.FromClause`,\n e.g. a :func:`.select` or :class:`.Alias` construct.\n\n An example would be a :class:`.Query` that selects ``User`` entities,\n but uses :meth:`.Query.select_entity_from` to have the entities\n selected from a :func:`.select` construct instead of the\n base ``user`` table::\n\n select_stmt = select([User]).where(User.id == 7)\n\n q = session.query(User).\\\n select_entity_from(select_stmt).\\\n filter(User.name == \'ed\')\n\n The query generated will select ``User`` entities directly\n from the given :func:`.select` construct, and will be::\n\n SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name\n FROM (SELECT "user".id AS id, "user".name AS name\n FROM "user"\n WHERE "user".id = :id_1) AS anon_1\n WHERE anon_1.name = :name_1\n\n Notice above that even the WHERE criterion was "adapted" such that\n the ``anon_1`` subquery effectively replaces all references to the\n ``user`` table, except for the one that it refers to internally.\n\n Compare this to :meth:`.Query.select_from`, which as of\n version 0.9, does not affect existing entities. The\n statement below::\n\n q = session.query(User).\\\n select_from(select_stmt).\\\n filter(User.name == \'ed\')\n\n Produces SQL where both the ``user`` table as well as the\n ``select_stmt`` construct are present as separate elements\n in the FROM clause. No "adaptation" of the ``user`` table\n is applied::\n\n SELECT "user".id AS user_id, "user".name AS user_name\n FROM "user", (SELECT "user".id AS id, "user".name AS name\n FROM "user"\n WHERE "user".id = :id_1) AS anon_1\n WHERE "user".name = :name_1\n\n :meth:`.Query.select_entity_from` maintains an older\n behavior of :meth:`.Query.select_from`. In modern usage,\n similar results can also be achieved using :func:`.aliased`::\n\n select_stmt = select([User]).where(User.id == 7)\n user_from_select = aliased(User, select_stmt.alias())\n\n q = session.query(user_from_select)\n\n :param from_obj: a :class:`.FromClause` object that will replace\n the FROM clause of this :class:`.Query`.\n\n .. seealso::\n\n :meth:`.Query.select_from`\n\n .. versionadded:: 0.8\n :meth:`.Query.select_entity_from` was added to specify\n the specific behavior of entity replacement, however\n the :meth:`.Query.select_from` maintains this behavior\n as well until 0.9.\n\n ' self._set_select_from([from_obj], True)
@_generative(_no_statement_condition) def slice(self, start, stop): 'apply LIMIT/OFFSET to the ``Query`` based on a "\n "range and return the newly resulting ``Query``.' if ((start is not None) and (stop is not None)): self._offset = ((self._offset or 0) + start) self._limit = (stop - start) elif ((start is None) and (stop is not None)): self._limit = stop elif ((start is not None) and (stop is None)): self._offset = ((self._offset or 0) + start) if (self._offset == 0): self._offset = None
4,062,953,535,849,662
apply LIMIT/OFFSET to the ``Query`` based on a " "range and return the newly resulting ``Query``.
lib/sqlalchemy/orm/query.py
slice
slafs/sqlalchemy
python
@_generative(_no_statement_condition) def slice(self, start, stop): 'apply LIMIT/OFFSET to the ``Query`` based on a "\n "range and return the newly resulting ``Query``.' if ((start is not None) and (stop is not None)): self._offset = ((self._offset or 0) + start) self._limit = (stop - start) elif ((start is None) and (stop is not None)): self._limit = stop elif ((start is not None) and (stop is None)): self._offset = ((self._offset or 0) + start) if (self._offset == 0): self._offset = None
@_generative(_no_statement_condition) def limit(self, limit): 'Apply a ``LIMIT`` to the query and return the newly resulting\n\n ``Query``.\n\n ' self._limit = limit
5,282,936,834,975,429,000
Apply a ``LIMIT`` to the query and return the newly resulting ``Query``.
lib/sqlalchemy/orm/query.py
limit
slafs/sqlalchemy
python
@_generative(_no_statement_condition) def limit(self, limit): 'Apply a ``LIMIT`` to the query and return the newly resulting\n\n ``Query``.\n\n ' self._limit = limit
@_generative(_no_statement_condition) def offset(self, offset): 'Apply an ``OFFSET`` to the query and return the newly resulting\n ``Query``.\n\n ' self._offset = offset
-2,520,904,459,605,291,000
Apply an ``OFFSET`` to the query and return the newly resulting ``Query``.
lib/sqlalchemy/orm/query.py
offset
slafs/sqlalchemy
python
@_generative(_no_statement_condition) def offset(self, offset): 'Apply an ``OFFSET`` to the query and return the newly resulting\n ``Query``.\n\n ' self._offset = offset
@_generative(_no_statement_condition) def distinct(self, *criterion): 'Apply a ``DISTINCT`` to the query and return the newly resulting\n ``Query``.\n\n :param \\*expr: optional column expressions. When present,\n the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``\n construct.\n\n ' if (not criterion): self._distinct = True else: criterion = self._adapt_col_list(criterion) if isinstance(self._distinct, list): self._distinct += criterion else: self._distinct = criterion
-8,107,857,850,585,034,000
Apply a ``DISTINCT`` to the query and return the newly resulting ``Query``. :param \*expr: optional column expressions. When present, the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)`` construct.
lib/sqlalchemy/orm/query.py
distinct
slafs/sqlalchemy
python
@_generative(_no_statement_condition) def distinct(self, *criterion): 'Apply a ``DISTINCT`` to the query and return the newly resulting\n ``Query``.\n\n :param \\*expr: optional column expressions. When present,\n the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``\n construct.\n\n ' if (not criterion): self._distinct = True else: criterion = self._adapt_col_list(criterion) if isinstance(self._distinct, list): self._distinct += criterion else: self._distinct = criterion
@_generative() def prefix_with(self, *prefixes): "Apply the prefixes to the query and return the newly resulting\n ``Query``.\n\n :param \\*prefixes: optional prefixes, typically strings,\n not using any commas. In particular is useful for MySQL keywords.\n\n e.g.::\n\n query = sess.query(User.name).\\\n prefix_with('HIGH_PRIORITY').\\\n prefix_with('SQL_SMALL_RESULT', 'ALL')\n\n Would render::\n\n SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name\n FROM users\n\n .. versionadded:: 0.7.7\n\n " if self._prefixes: self._prefixes += prefixes else: self._prefixes = prefixes
5,733,379,320,651,781,000
Apply the prefixes to the query and return the newly resulting ``Query``. :param \*prefixes: optional prefixes, typically strings, not using any commas. In particular is useful for MySQL keywords. e.g.:: query = sess.query(User.name).\ prefix_with('HIGH_PRIORITY').\ prefix_with('SQL_SMALL_RESULT', 'ALL') Would render:: SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name FROM users .. versionadded:: 0.7.7
lib/sqlalchemy/orm/query.py
prefix_with
slafs/sqlalchemy
python
@_generative() def prefix_with(self, *prefixes): "Apply the prefixes to the query and return the newly resulting\n ``Query``.\n\n :param \\*prefixes: optional prefixes, typically strings,\n not using any commas. In particular is useful for MySQL keywords.\n\n e.g.::\n\n query = sess.query(User.name).\\\n prefix_with('HIGH_PRIORITY').\\\n prefix_with('SQL_SMALL_RESULT', 'ALL')\n\n Would render::\n\n SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name\n FROM users\n\n .. versionadded:: 0.7.7\n\n " if self._prefixes: self._prefixes += prefixes else: self._prefixes = prefixes
def all(self): 'Return the results represented by this ``Query`` as a list.\n\n This results in an execution of the underlying query.\n\n ' return list(self)
4,352,606,581,994,707,000
Return the results represented by this ``Query`` as a list. This results in an execution of the underlying query.
lib/sqlalchemy/orm/query.py
all
slafs/sqlalchemy
python
def all(self): 'Return the results represented by this ``Query`` as a list.\n\n This results in an execution of the underlying query.\n\n ' return list(self)
@_generative(_no_clauseelement_condition) def from_statement(self, statement): 'Execute the given SELECT statement and return results.\n\n This method bypasses all internal statement compilation, and the\n statement is executed without modification.\n\n The statement is typically either a :func:`~.expression.text`\n or :func:`~.expression.select` construct, and should return the set\n of columns\n appropriate to the entity class represented by this :class:`.Query`.\n\n .. seealso::\n\n :ref:`orm_tutorial_literal_sql` - usage examples in the\n ORM tutorial\n\n ' statement = expression._expression_literal_as_text(statement) if (not isinstance(statement, (expression.TextClause, expression.SelectBase))): raise sa_exc.ArgumentError('from_statement accepts text(), select(), and union() objects only.') self._statement = statement
7,552,317,356,849,819,000
Execute the given SELECT statement and return results. This method bypasses all internal statement compilation, and the statement is executed without modification. The statement is typically either a :func:`~.expression.text` or :func:`~.expression.select` construct, and should return the set of columns appropriate to the entity class represented by this :class:`.Query`. .. seealso:: :ref:`orm_tutorial_literal_sql` - usage examples in the ORM tutorial
lib/sqlalchemy/orm/query.py
from_statement
slafs/sqlalchemy
python
@_generative(_no_clauseelement_condition) def from_statement(self, statement): 'Execute the given SELECT statement and return results.\n\n This method bypasses all internal statement compilation, and the\n statement is executed without modification.\n\n The statement is typically either a :func:`~.expression.text`\n or :func:`~.expression.select` construct, and should return the set\n of columns\n appropriate to the entity class represented by this :class:`.Query`.\n\n .. seealso::\n\n :ref:`orm_tutorial_literal_sql` - usage examples in the\n ORM tutorial\n\n ' statement = expression._expression_literal_as_text(statement) if (not isinstance(statement, (expression.TextClause, expression.SelectBase))): raise sa_exc.ArgumentError('from_statement accepts text(), select(), and union() objects only.') self._statement = statement
def first(self): "Return the first result of this ``Query`` or\n None if the result doesn't contain any row.\n\n first() applies a limit of one within the generated SQL, so that\n only one primary entity row is generated on the server side\n (note this may consist of multiple result rows if join-loaded\n collections are present).\n\n Calling ``first()`` results in an execution of the underlying query.\n\n " if (self._statement is not None): ret = list(self)[0:1] else: ret = list(self[0:1]) if (len(ret) > 0): return ret[0] else: return None
-1,583,235,085,555,168,300
Return the first result of this ``Query`` or None if the result doesn't contain any row. first() applies a limit of one within the generated SQL, so that only one primary entity row is generated on the server side (note this may consist of multiple result rows if join-loaded collections are present). Calling ``first()`` results in an execution of the underlying query.
lib/sqlalchemy/orm/query.py
first
slafs/sqlalchemy
python
def first(self): "Return the first result of this ``Query`` or\n None if the result doesn't contain any row.\n\n first() applies a limit of one within the generated SQL, so that\n only one primary entity row is generated on the server side\n (note this may consist of multiple result rows if join-loaded\n collections are present).\n\n Calling ``first()`` results in an execution of the underlying query.\n\n " if (self._statement is not None): ret = list(self)[0:1] else: ret = list(self[0:1]) if (len(ret) > 0): return ret[0] else: return None
def one(self): 'Return exactly one result or raise an exception.\n\n Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects\n no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``\n if multiple object identities are returned, or if multiple\n rows are returned for a query that does not return object\n identities.\n\n Note that an entity query, that is, one which selects one or\n more mapped classes as opposed to individual column attributes,\n may ultimately represent many rows but only one row of\n unique entity or entities - this is a successful result for one().\n\n Calling ``one()`` results in an execution of the underlying query.\n\n .. versionchanged:: 0.6\n ``one()`` fully fetches all results instead of applying\n any kind of limit, so that the "unique"-ing of entities does not\n conceal multiple object identities.\n\n ' ret = list(self) l = len(ret) if (l == 1): return ret[0] elif (l == 0): raise orm_exc.NoResultFound('No row was found for one()') else: raise orm_exc.MultipleResultsFound('Multiple rows were found for one()')
2,604,968,758,797,791,000
Return exactly one result or raise an exception. Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` if multiple object identities are returned, or if multiple rows are returned for a query that does not return object identities. Note that an entity query, that is, one which selects one or more mapped classes as opposed to individual column attributes, may ultimately represent many rows but only one row of unique entity or entities - this is a successful result for one(). Calling ``one()`` results in an execution of the underlying query. .. versionchanged:: 0.6 ``one()`` fully fetches all results instead of applying any kind of limit, so that the "unique"-ing of entities does not conceal multiple object identities.
lib/sqlalchemy/orm/query.py
one
slafs/sqlalchemy
python
def one(self): 'Return exactly one result or raise an exception.\n\n Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects\n no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``\n if multiple object identities are returned, or if multiple\n rows are returned for a query that does not return object\n identities.\n\n Note that an entity query, that is, one which selects one or\n more mapped classes as opposed to individual column attributes,\n may ultimately represent many rows but only one row of\n unique entity or entities - this is a successful result for one().\n\n Calling ``one()`` results in an execution of the underlying query.\n\n .. versionchanged:: 0.6\n ``one()`` fully fetches all results instead of applying\n any kind of limit, so that the "unique"-ing of entities does not\n conceal multiple object identities.\n\n ' ret = list(self) l = len(ret) if (l == 1): return ret[0] elif (l == 0): raise orm_exc.NoResultFound('No row was found for one()') else: raise orm_exc.MultipleResultsFound('Multiple rows were found for one()')
def scalar(self): 'Return the first element of the first result or None\n if no rows present. If multiple rows are returned,\n raises MultipleResultsFound.\n\n >>> session.query(Item).scalar()\n <Item>\n >>> session.query(Item.id).scalar()\n 1\n >>> session.query(Item.id).filter(Item.id < 0).scalar()\n None\n >>> session.query(Item.id, Item.name).scalar()\n 1\n >>> session.query(func.count(Parent.id)).scalar()\n 20\n\n This results in an execution of the underlying query.\n\n ' try: ret = self.one() if (not isinstance(ret, tuple)): return ret return ret[0] except orm_exc.NoResultFound: return None
-7,254,197,736,888,337,000
Return the first element of the first result or None if no rows present. If multiple rows are returned, raises MultipleResultsFound. >>> session.query(Item).scalar() <Item> >>> session.query(Item.id).scalar() 1 >>> session.query(Item.id).filter(Item.id < 0).scalar() None >>> session.query(Item.id, Item.name).scalar() 1 >>> session.query(func.count(Parent.id)).scalar() 20 This results in an execution of the underlying query.
lib/sqlalchemy/orm/query.py
scalar
slafs/sqlalchemy
python
def scalar(self): 'Return the first element of the first result or None\n if no rows present. If multiple rows are returned,\n raises MultipleResultsFound.\n\n >>> session.query(Item).scalar()\n <Item>\n >>> session.query(Item.id).scalar()\n 1\n >>> session.query(Item.id).filter(Item.id < 0).scalar()\n None\n >>> session.query(Item.id, Item.name).scalar()\n 1\n >>> session.query(func.count(Parent.id)).scalar()\n 20\n\n This results in an execution of the underlying query.\n\n ' try: ret = self.one() if (not isinstance(ret, tuple)): return ret return ret[0] except orm_exc.NoResultFound: return None
@property def column_descriptions(self): "Return metadata about the columns which would be\n returned by this :class:`.Query`.\n\n Format is a list of dictionaries::\n\n user_alias = aliased(User, name='user2')\n q = sess.query(User, User.id, user_alias)\n\n # this expression:\n q.column_descriptions\n\n # would return:\n [\n {\n 'name':'User',\n 'type':User,\n 'aliased':False,\n 'expr':User,\n },\n {\n 'name':'id',\n 'type':Integer(),\n 'aliased':False,\n 'expr':User.id,\n },\n {\n 'name':'user2',\n 'type':User,\n 'aliased':True,\n 'expr':user_alias\n }\n ]\n\n " return [{'name': ent._label_name, 'type': ent.type, 'aliased': getattr(ent, 'is_aliased_class', False), 'expr': ent.expr} for ent in self._entities]
4,443,220,158,900,704,000
Return metadata about the columns which would be returned by this :class:`.Query`. Format is a list of dictionaries:: user_alias = aliased(User, name='user2') q = sess.query(User, User.id, user_alias) # this expression: q.column_descriptions # would return: [ { 'name':'User', 'type':User, 'aliased':False, 'expr':User, }, { 'name':'id', 'type':Integer(), 'aliased':False, 'expr':User.id, }, { 'name':'user2', 'type':User, 'aliased':True, 'expr':user_alias } ]
lib/sqlalchemy/orm/query.py
column_descriptions
slafs/sqlalchemy
python
@property def column_descriptions(self): "Return metadata about the columns which would be\n returned by this :class:`.Query`.\n\n Format is a list of dictionaries::\n\n user_alias = aliased(User, name='user2')\n q = sess.query(User, User.id, user_alias)\n\n # this expression:\n q.column_descriptions\n\n # would return:\n [\n {\n 'name':'User',\n 'type':User,\n 'aliased':False,\n 'expr':User,\n },\n {\n 'name':'id',\n 'type':Integer(),\n 'aliased':False,\n 'expr':User.id,\n },\n {\n 'name':'user2',\n 'type':User,\n 'aliased':True,\n 'expr':user_alias\n }\n ]\n\n " return [{'name': ent._label_name, 'type': ent.type, 'aliased': getattr(ent, 'is_aliased_class', False), 'expr': ent.expr} for ent in self._entities]
def instances(self, cursor, __context=None): 'Given a ResultProxy cursor as returned by connection.execute(),\n return an ORM result as an iterator.\n\n e.g.::\n\n result = engine.execute("select * from users")\n for u in session.query(User).instances(result):\n print u\n ' context = __context if (context is None): context = QueryContext(self) return loading.instances(self, cursor, context)
214,403,814,212,797,630
Given a ResultProxy cursor as returned by connection.execute(), return an ORM result as an iterator. e.g.:: result = engine.execute("select * from users") for u in session.query(User).instances(result): print u
lib/sqlalchemy/orm/query.py
instances
slafs/sqlalchemy
python
def instances(self, cursor, __context=None): 'Given a ResultProxy cursor as returned by connection.execute(),\n return an ORM result as an iterator.\n\n e.g.::\n\n result = engine.execute("select * from users")\n for u in session.query(User).instances(result):\n print u\n ' context = __context if (context is None): context = QueryContext(self) return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True): "Merge a result into this :class:`.Query` object's Session.\n\n Given an iterator returned by a :class:`.Query` of the same structure\n as this one, return an identical iterator of results, with all mapped\n instances merged into the session using :meth:`.Session.merge`. This\n is an optimized method which will merge all mapped instances,\n preserving the structure of the result rows and unmapped columns with\n less method overhead than that of calling :meth:`.Session.merge`\n explicitly for each value.\n\n The structure of the results is determined based on the column list of\n this :class:`.Query` - if these do not correspond, unchecked errors\n will occur.\n\n The 'load' argument is the same as that of :meth:`.Session.merge`.\n\n For an example of how :meth:`~.Query.merge_result` is used, see\n the source code for the example :ref:`examples_caching`, where\n :meth:`~.Query.merge_result` is used to efficiently restore state\n from a cache back into a target :class:`.Session`.\n\n " return loading.merge_result(self, iterator, load)
-3,701,953,095,847,633,000
Merge a result into this :class:`.Query` object's Session. Given an iterator returned by a :class:`.Query` of the same structure as this one, return an identical iterator of results, with all mapped instances merged into the session using :meth:`.Session.merge`. This is an optimized method which will merge all mapped instances, preserving the structure of the result rows and unmapped columns with less method overhead than that of calling :meth:`.Session.merge` explicitly for each value. The structure of the results is determined based on the column list of this :class:`.Query` - if these do not correspond, unchecked errors will occur. The 'load' argument is the same as that of :meth:`.Session.merge`. For an example of how :meth:`~.Query.merge_result` is used, see the source code for the example :ref:`examples_caching`, where :meth:`~.Query.merge_result` is used to efficiently restore state from a cache back into a target :class:`.Session`.
lib/sqlalchemy/orm/query.py
merge_result
slafs/sqlalchemy
python
def merge_result(self, iterator, load=True): "Merge a result into this :class:`.Query` object's Session.\n\n Given an iterator returned by a :class:`.Query` of the same structure\n as this one, return an identical iterator of results, with all mapped\n instances merged into the session using :meth:`.Session.merge`. This\n is an optimized method which will merge all mapped instances,\n preserving the structure of the result rows and unmapped columns with\n less method overhead than that of calling :meth:`.Session.merge`\n explicitly for each value.\n\n The structure of the results is determined based on the column list of\n this :class:`.Query` - if these do not correspond, unchecked errors\n will occur.\n\n The 'load' argument is the same as that of :meth:`.Session.merge`.\n\n For an example of how :meth:`~.Query.merge_result` is used, see\n the source code for the example :ref:`examples_caching`, where\n :meth:`~.Query.merge_result` is used to efficiently restore state\n from a cache back into a target :class:`.Session`.\n\n " return loading.merge_result(self, iterator, load)
def exists(self): "A convenience method that turns a query into an EXISTS subquery\n of the form EXISTS (SELECT 1 FROM ... WHERE ...).\n\n e.g.::\n\n q = session.query(User).filter(User.name == 'fred')\n session.query(q.exists())\n\n Producing SQL similar to::\n\n SELECT EXISTS (\n SELECT 1 FROM users WHERE users.name = :name_1\n ) AS anon_1\n\n The EXISTS construct is usually used in the WHERE clause::\n\n session.query(User.id).filter(q.exists()).scalar()\n\n Note that some databases such as SQL Server don't allow an\n EXISTS expression to be present in the columns clause of a\n SELECT. To select a simple boolean value based on the exists\n as a WHERE, use :func:`.literal`::\n\n from sqlalchemy import literal\n\n session.query(literal(True)).filter(q.exists()).scalar()\n\n .. versionadded:: 0.8.1\n\n " return sql.exists(self.add_columns('1').with_labels().statement.with_only_columns([1]))
2,540,586,680,894,167,000
A convenience method that turns a query into an EXISTS subquery of the form EXISTS (SELECT 1 FROM ... WHERE ...). e.g.:: q = session.query(User).filter(User.name == 'fred') session.query(q.exists()) Producing SQL similar to:: SELECT EXISTS ( SELECT 1 FROM users WHERE users.name = :name_1 ) AS anon_1 The EXISTS construct is usually used in the WHERE clause:: session.query(User.id).filter(q.exists()).scalar() Note that some databases such as SQL Server don't allow an EXISTS expression to be present in the columns clause of a SELECT. To select a simple boolean value based on the exists as a WHERE, use :func:`.literal`:: from sqlalchemy import literal session.query(literal(True)).filter(q.exists()).scalar() .. versionadded:: 0.8.1
lib/sqlalchemy/orm/query.py
exists
slafs/sqlalchemy
python
def exists(self): "A convenience method that turns a query into an EXISTS subquery\n of the form EXISTS (SELECT 1 FROM ... WHERE ...).\n\n e.g.::\n\n q = session.query(User).filter(User.name == 'fred')\n session.query(q.exists())\n\n Producing SQL similar to::\n\n SELECT EXISTS (\n SELECT 1 FROM users WHERE users.name = :name_1\n ) AS anon_1\n\n The EXISTS construct is usually used in the WHERE clause::\n\n session.query(User.id).filter(q.exists()).scalar()\n\n Note that some databases such as SQL Server don't allow an\n EXISTS expression to be present in the columns clause of a\n SELECT. To select a simple boolean value based on the exists\n as a WHERE, use :func:`.literal`::\n\n from sqlalchemy import literal\n\n session.query(literal(True)).filter(q.exists()).scalar()\n\n .. versionadded:: 0.8.1\n\n " return sql.exists(self.add_columns('1').with_labels().statement.with_only_columns([1]))
def count(self): 'Return a count of rows this Query would return.\n\n This generates the SQL for this Query as follows::\n\n SELECT count(1) AS count_1 FROM (\n SELECT <rest of query follows...>\n ) AS anon_1\n\n .. versionchanged:: 0.7\n The above scheme is newly refined as of 0.7b3.\n\n For fine grained control over specific columns\n to count, to skip the usage of a subquery or\n otherwise control of the FROM clause,\n or to use other aggregate functions,\n use :attr:`~sqlalchemy.sql.expression.func`\n expressions in conjunction\n with :meth:`~.Session.query`, i.e.::\n\n from sqlalchemy import func\n\n # count User records, without\n # using a subquery.\n session.query(func.count(User.id))\n\n # return count of user "id" grouped\n # by "name"\n session.query(func.count(User.id)).\\\n group_by(User.name)\n\n from sqlalchemy import distinct\n\n # count distinct "name" values\n session.query(func.count(distinct(User.name)))\n\n ' col = sql.func.count(sql.literal_column('*')) return self.from_self(col).scalar()
1,003,972,134,544,188,200
Return a count of rows this Query would return. This generates the SQL for this Query as follows:: SELECT count(1) AS count_1 FROM ( SELECT <rest of query follows...> ) AS anon_1 .. versionchanged:: 0.7 The above scheme is newly refined as of 0.7b3. For fine grained control over specific columns to count, to skip the usage of a subquery or otherwise control of the FROM clause, or to use other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func` expressions in conjunction with :meth:`~.Session.query`, i.e.:: from sqlalchemy import func # count User records, without # using a subquery. session.query(func.count(User.id)) # return count of user "id" grouped # by "name" session.query(func.count(User.id)).\ group_by(User.name) from sqlalchemy import distinct # count distinct "name" values session.query(func.count(distinct(User.name)))
lib/sqlalchemy/orm/query.py
count
slafs/sqlalchemy
python
def count(self): 'Return a count of rows this Query would return.\n\n This generates the SQL for this Query as follows::\n\n SELECT count(1) AS count_1 FROM (\n SELECT <rest of query follows...>\n ) AS anon_1\n\n .. versionchanged:: 0.7\n The above scheme is newly refined as of 0.7b3.\n\n For fine grained control over specific columns\n to count, to skip the usage of a subquery or\n otherwise control of the FROM clause,\n or to use other aggregate functions,\n use :attr:`~sqlalchemy.sql.expression.func`\n expressions in conjunction\n with :meth:`~.Session.query`, i.e.::\n\n from sqlalchemy import func\n\n # count User records, without\n # using a subquery.\n session.query(func.count(User.id))\n\n # return count of user "id" grouped\n # by "name"\n session.query(func.count(User.id)).\\\n group_by(User.name)\n\n from sqlalchemy import distinct\n\n # count distinct "name" values\n session.query(func.count(distinct(User.name)))\n\n ' col = sql.func.count(sql.literal_column('*')) return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'): 'Perform a bulk delete query.\n\n Deletes rows matched by this query from the database.\n\n :param synchronize_session: chooses the strategy for the removal of\n matched objects from the session. Valid values are:\n\n ``False`` - don\'t synchronize the session. This option is the most\n efficient and is reliable once the session is expired, which\n typically occurs after a commit(), or explicitly using\n expire_all(). Before the expiration, objects may still remain in\n the session which were in fact deleted which can lead to confusing\n results if they are accessed via get() or already loaded\n collections.\n\n ``\'fetch\'`` - performs a select query before the delete to find\n objects that are matched by the delete query and need to be\n removed from the session. Matched objects are removed from the\n session.\n\n ``\'evaluate\'`` - Evaluate the query\'s criteria in Python straight\n on the objects in the session. If evaluation of the criteria isn\'t\n implemented, an error is raised. In that case you probably\n want to use the \'fetch\' strategy as a fallback.\n\n The expression evaluator currently doesn\'t account for differing\n string collations between the database and Python.\n\n :return: the count of rows matched as returned by the database\'s\n "row count" feature.\n\n This method has several key caveats:\n\n * The method does **not** offer in-Python cascading of relationships\n - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured\n for any foreign key references which require it, otherwise the\n database may emit an integrity violation if foreign key references\n are being enforced.\n\n After the DELETE, dependent objects in the :class:`.Session` which\n were impacted by an ON DELETE may not contain the current\n state, or may have been deleted. This issue is resolved once the\n :class:`.Session` is expired,\n which normally occurs upon :meth:`.Session.commit` or can be forced\n by using :meth:`.Session.expire_all`. Accessing an expired object\n whose row has been deleted will invoke a SELECT to locate the\n row; when the row is not found, an\n :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.\n\n * The :meth:`.MapperEvents.before_delete` and\n :meth:`.MapperEvents.after_delete`\n events are **not** invoked from this method. Instead, the\n :meth:`.SessionEvents.after_bulk_delete` method is provided to act\n upon a mass DELETE of entity rows.\n\n .. seealso::\n\n :meth:`.Query.update`\n\n :ref:`inserts_and_updates` - Core SQL tutorial\n\n ' delete_op = persistence.BulkDelete.factory(self, synchronize_session) delete_op.exec_() return delete_op.rowcount
8,814,820,009,756,160,000
Perform a bulk delete query. Deletes rows matched by this query from the database. :param synchronize_session: chooses the strategy for the removal of matched objects from the session. Valid values are: ``False`` - don't synchronize the session. This option is the most efficient and is reliable once the session is expired, which typically occurs after a commit(), or explicitly using expire_all(). Before the expiration, objects may still remain in the session which were in fact deleted which can lead to confusing results if they are accessed via get() or already loaded collections. ``'fetch'`` - performs a select query before the delete to find objects that are matched by the delete query and need to be removed from the session. Matched objects are removed from the session. ``'evaluate'`` - Evaluate the query's criteria in Python straight on the objects in the session. If evaluation of the criteria isn't implemented, an error is raised. In that case you probably want to use the 'fetch' strategy as a fallback. The expression evaluator currently doesn't account for differing string collations between the database and Python. :return: the count of rows matched as returned by the database's "row count" feature. This method has several key caveats: * The method does **not** offer in-Python cascading of relationships - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured for any foreign key references which require it, otherwise the database may emit an integrity violation if foreign key references are being enforced. After the DELETE, dependent objects in the :class:`.Session` which were impacted by an ON DELETE may not contain the current state, or may have been deleted. This issue is resolved once the :class:`.Session` is expired, which normally occurs upon :meth:`.Session.commit` or can be forced by using :meth:`.Session.expire_all`. Accessing an expired object whose row has been deleted will invoke a SELECT to locate the row; when the row is not found, an :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. * The :meth:`.MapperEvents.before_delete` and :meth:`.MapperEvents.after_delete` events are **not** invoked from this method. Instead, the :meth:`.SessionEvents.after_bulk_delete` method is provided to act upon a mass DELETE of entity rows. .. seealso:: :meth:`.Query.update` :ref:`inserts_and_updates` - Core SQL tutorial
lib/sqlalchemy/orm/query.py
delete
slafs/sqlalchemy
python
def delete(self, synchronize_session='evaluate'): 'Perform a bulk delete query.\n\n Deletes rows matched by this query from the database.\n\n :param synchronize_session: chooses the strategy for the removal of\n matched objects from the session. Valid values are:\n\n ``False`` - don\'t synchronize the session. This option is the most\n efficient and is reliable once the session is expired, which\n typically occurs after a commit(), or explicitly using\n expire_all(). Before the expiration, objects may still remain in\n the session which were in fact deleted which can lead to confusing\n results if they are accessed via get() or already loaded\n collections.\n\n ``\'fetch\'`` - performs a select query before the delete to find\n objects that are matched by the delete query and need to be\n removed from the session. Matched objects are removed from the\n session.\n\n ``\'evaluate\'`` - Evaluate the query\'s criteria in Python straight\n on the objects in the session. If evaluation of the criteria isn\'t\n implemented, an error is raised. In that case you probably\n want to use the \'fetch\' strategy as a fallback.\n\n The expression evaluator currently doesn\'t account for differing\n string collations between the database and Python.\n\n :return: the count of rows matched as returned by the database\'s\n "row count" feature.\n\n This method has several key caveats:\n\n * The method does **not** offer in-Python cascading of relationships\n - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured\n for any foreign key references which require it, otherwise the\n database may emit an integrity violation if foreign key references\n are being enforced.\n\n After the DELETE, dependent objects in the :class:`.Session` which\n were impacted by an ON DELETE may not contain the current\n state, or may have been deleted. This issue is resolved once the\n :class:`.Session` is expired,\n which normally occurs upon :meth:`.Session.commit` or can be forced\n by using :meth:`.Session.expire_all`. Accessing an expired object\n whose row has been deleted will invoke a SELECT to locate the\n row; when the row is not found, an\n :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.\n\n * The :meth:`.MapperEvents.before_delete` and\n :meth:`.MapperEvents.after_delete`\n events are **not** invoked from this method. Instead, the\n :meth:`.SessionEvents.after_bulk_delete` method is provided to act\n upon a mass DELETE of entity rows.\n\n .. seealso::\n\n :meth:`.Query.update`\n\n :ref:`inserts_and_updates` - Core SQL tutorial\n\n ' delete_op = persistence.BulkDelete.factory(self, synchronize_session) delete_op.exec_() return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'): 'Perform a bulk update query.\n\n Updates rows matched by this query in the database.\n\n E.g.::\n\n sess.query(User).filter(User.age == 25). update({User.age: User.age - 10}, synchronize_session=\'fetch\')\n\n\n sess.query(User).filter(User.age == 25). update({"age": User.age - 10}, synchronize_session=\'evaluate\')\n\n\n :param values: a dictionary with attributes names, or alternatively\n mapped attributes or SQL expressions, as keys, and literal\n values or sql expressions as values.\n\n .. versionchanged:: 1.0.0 - string names in the values dictionary\n are now resolved against the mapped entity; previously, these\n strings were passed as literal column names with no mapper-level\n translation.\n\n :param synchronize_session: chooses the strategy to update the\n attributes on objects in the session. Valid values are:\n\n ``False`` - don\'t synchronize the session. This option is the most\n efficient and is reliable once the session is expired, which\n typically occurs after a commit(), or explicitly using\n expire_all(). Before the expiration, updated objects may still\n remain in the session with stale values on their attributes, which\n can lead to confusing results.\n\n ``\'fetch\'`` - performs a select query before the update to find\n objects that are matched by the update query. The updated\n attributes are expired on matched objects.\n\n ``\'evaluate\'`` - Evaluate the Query\'s criteria in Python straight\n on the objects in the session. If evaluation of the criteria isn\'t\n implemented, an exception is raised.\n\n The expression evaluator currently doesn\'t account for differing\n string collations between the database and Python.\n\n :return: the count of rows matched as returned by the database\'s\n "row count" feature.\n\n This method has several key caveats:\n\n * The method does **not** offer in-Python cascading of relationships\n - it is assumed that ON UPDATE CASCADE is configured for any foreign\n key references which require it, otherwise the database may emit an\n integrity violation if foreign key references are being enforced.\n\n After the UPDATE, dependent objects in the :class:`.Session` which\n were impacted by an ON UPDATE CASCADE may not contain the current\n state; this issue is resolved once the :class:`.Session` is expired,\n which normally occurs upon :meth:`.Session.commit` or can be forced\n by using :meth:`.Session.expire_all`.\n\n * The method supports multiple table updates, as\n detailed in :ref:`multi_table_updates`, and this behavior does\n extend to support updates of joined-inheritance and other multiple\n table mappings. However, the **join condition of an inheritance\n mapper is currently not automatically rendered**.\n Care must be taken in any multiple-table update to explicitly\n include the joining condition between those tables, even in mappings\n where this is normally automatic.\n E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of\n the ``Engineer`` local table using criteria against the ``Employee``\n local table might look like::\n\n session.query(Engineer).\\\n filter(Engineer.id == Employee.id).\\\n filter(Employee.name == \'dilbert\').\\\n update({"engineer_type": "programmer"})\n\n * The :meth:`.MapperEvents.before_update` and\n :meth:`.MapperEvents.after_update`\n events are **not** invoked from this method. Instead, the\n :meth:`.SessionEvents.after_bulk_update` method is provided to act\n upon a mass UPDATE of entity rows.\n\n .. seealso::\n\n :meth:`.Query.delete`\n\n :ref:`inserts_and_updates` - Core SQL tutorial\n\n ' update_op = persistence.BulkUpdate.factory(self, synchronize_session, values) update_op.exec_() return update_op.rowcount
-7,350,619,407,197,641,000
Perform a bulk update query. Updates rows matched by this query in the database. E.g.:: sess.query(User).filter(User.age == 25). update({User.age: User.age - 10}, synchronize_session='fetch') sess.query(User).filter(User.age == 25). update({"age": User.age - 10}, synchronize_session='evaluate') :param values: a dictionary with attributes names, or alternatively mapped attributes or SQL expressions, as keys, and literal values or sql expressions as values. .. versionchanged:: 1.0.0 - string names in the values dictionary are now resolved against the mapped entity; previously, these strings were passed as literal column names with no mapper-level translation. :param synchronize_session: chooses the strategy to update the attributes on objects in the session. Valid values are: ``False`` - don't synchronize the session. This option is the most efficient and is reliable once the session is expired, which typically occurs after a commit(), or explicitly using expire_all(). Before the expiration, updated objects may still remain in the session with stale values on their attributes, which can lead to confusing results. ``'fetch'`` - performs a select query before the update to find objects that are matched by the update query. The updated attributes are expired on matched objects. ``'evaluate'`` - Evaluate the Query's criteria in Python straight on the objects in the session. If evaluation of the criteria isn't implemented, an exception is raised. The expression evaluator currently doesn't account for differing string collations between the database and Python. :return: the count of rows matched as returned by the database's "row count" feature. This method has several key caveats: * The method does **not** offer in-Python cascading of relationships - it is assumed that ON UPDATE CASCADE is configured for any foreign key references which require it, otherwise the database may emit an integrity violation if foreign key references are being enforced. After the UPDATE, dependent objects in the :class:`.Session` which were impacted by an ON UPDATE CASCADE may not contain the current state; this issue is resolved once the :class:`.Session` is expired, which normally occurs upon :meth:`.Session.commit` or can be forced by using :meth:`.Session.expire_all`. * The method supports multiple table updates, as detailed in :ref:`multi_table_updates`, and this behavior does extend to support updates of joined-inheritance and other multiple table mappings. However, the **join condition of an inheritance mapper is currently not automatically rendered**. Care must be taken in any multiple-table update to explicitly include the joining condition between those tables, even in mappings where this is normally automatic. E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of the ``Engineer`` local table using criteria against the ``Employee`` local table might look like:: session.query(Engineer).\ filter(Engineer.id == Employee.id).\ filter(Employee.name == 'dilbert').\ update({"engineer_type": "programmer"}) * The :meth:`.MapperEvents.before_update` and :meth:`.MapperEvents.after_update` events are **not** invoked from this method. Instead, the :meth:`.SessionEvents.after_bulk_update` method is provided to act upon a mass UPDATE of entity rows. .. seealso:: :meth:`.Query.delete` :ref:`inserts_and_updates` - Core SQL tutorial
lib/sqlalchemy/orm/query.py
update
slafs/sqlalchemy
python
def update(self, values, synchronize_session='evaluate'): 'Perform a bulk update query.\n\n Updates rows matched by this query in the database.\n\n E.g.::\n\n sess.query(User).filter(User.age == 25). update({User.age: User.age - 10}, synchronize_session=\'fetch\')\n\n\n sess.query(User).filter(User.age == 25). update({"age": User.age - 10}, synchronize_session=\'evaluate\')\n\n\n :param values: a dictionary with attributes names, or alternatively\n mapped attributes or SQL expressions, as keys, and literal\n values or sql expressions as values.\n\n .. versionchanged:: 1.0.0 - string names in the values dictionary\n are now resolved against the mapped entity; previously, these\n strings were passed as literal column names with no mapper-level\n translation.\n\n :param synchronize_session: chooses the strategy to update the\n attributes on objects in the session. Valid values are:\n\n ``False`` - don\'t synchronize the session. This option is the most\n efficient and is reliable once the session is expired, which\n typically occurs after a commit(), or explicitly using\n expire_all(). Before the expiration, updated objects may still\n remain in the session with stale values on their attributes, which\n can lead to confusing results.\n\n ``\'fetch\'`` - performs a select query before the update to find\n objects that are matched by the update query. The updated\n attributes are expired on matched objects.\n\n ``\'evaluate\'`` - Evaluate the Query\'s criteria in Python straight\n on the objects in the session. If evaluation of the criteria isn\'t\n implemented, an exception is raised.\n\n The expression evaluator currently doesn\'t account for differing\n string collations between the database and Python.\n\n :return: the count of rows matched as returned by the database\'s\n "row count" feature.\n\n This method has several key caveats:\n\n * The method does **not** offer in-Python cascading of relationships\n - it is assumed that ON UPDATE CASCADE is configured for any foreign\n key references which require it, otherwise the database may emit an\n integrity violation if foreign key references are being enforced.\n\n After the UPDATE, dependent objects in the :class:`.Session` which\n were impacted by an ON UPDATE CASCADE may not contain the current\n state; this issue is resolved once the :class:`.Session` is expired,\n which normally occurs upon :meth:`.Session.commit` or can be forced\n by using :meth:`.Session.expire_all`.\n\n * The method supports multiple table updates, as\n detailed in :ref:`multi_table_updates`, and this behavior does\n extend to support updates of joined-inheritance and other multiple\n table mappings. However, the **join condition of an inheritance\n mapper is currently not automatically rendered**.\n Care must be taken in any multiple-table update to explicitly\n include the joining condition between those tables, even in mappings\n where this is normally automatic.\n E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of\n the ``Engineer`` local table using criteria against the ``Employee``\n local table might look like::\n\n session.query(Engineer).\\\n filter(Engineer.id == Employee.id).\\\n filter(Employee.name == \'dilbert\').\\\n update({"engineer_type": "programmer"})\n\n * The :meth:`.MapperEvents.before_update` and\n :meth:`.MapperEvents.after_update`\n events are **not** invoked from this method. Instead, the\n :meth:`.SessionEvents.after_bulk_update` method is provided to act\n upon a mass UPDATE of entity rows.\n\n .. seealso::\n\n :meth:`.Query.delete`\n\n :ref:`inserts_and_updates` - Core SQL tutorial\n\n ' update_op = persistence.BulkUpdate.factory(self, synchronize_session, values) update_op.exec_() return update_op.rowcount
def _adjust_for_single_inheritance(self, context): 'Apply single-table-inheritance filtering.\n\n For all distinct single-table-inheritance mappers represented in\n the columns clause of this query, add criterion to the WHERE\n clause of the given QueryContext such that only the appropriate\n subtypes are selected from the total results.\n\n ' for (ext_info, adapter) in set(self._mapper_adapter_map.values()): if (ext_info in self._join_entities): continue single_crit = ext_info.mapper._single_table_criterion if (single_crit is not None): if adapter: single_crit = adapter.traverse(single_crit) single_crit = self._adapt_clause(single_crit, False, False) context.whereclause = sql.and_(sql.True_._ifnone(context.whereclause), single_crit)
-7,571,517,035,935,323,000
Apply single-table-inheritance filtering. For all distinct single-table-inheritance mappers represented in the columns clause of this query, add criterion to the WHERE clause of the given QueryContext such that only the appropriate subtypes are selected from the total results.
lib/sqlalchemy/orm/query.py
_adjust_for_single_inheritance
slafs/sqlalchemy
python
def _adjust_for_single_inheritance(self, context): 'Apply single-table-inheritance filtering.\n\n For all distinct single-table-inheritance mappers represented in\n the columns clause of this query, add criterion to the WHERE\n clause of the given QueryContext such that only the appropriate\n subtypes are selected from the total results.\n\n ' for (ext_info, adapter) in set(self._mapper_adapter_map.values()): if (ext_info in self._join_entities): continue single_crit = ext_info.mapper._single_table_criterion if (single_crit is not None): if adapter: single_crit = adapter.traverse(single_crit) single_crit = self._adapt_clause(single_crit, False, False) context.whereclause = sql.and_(sql.True_._ifnone(context.whereclause), single_crit)
def set_with_polymorphic(self, query, cls_or_mappers, selectable, polymorphic_on): "Receive an update from a call to query.with_polymorphic().\n\n Note the newer style of using a free standing with_polymporphic()\n construct doesn't make use of this method.\n\n\n " if self.is_aliased_class: raise NotImplementedError("Can't use with_polymorphic() against an Aliased object") if (cls_or_mappers is None): query._reset_polymorphic_adapter(self.mapper) return (mappers, from_obj) = self.mapper._with_polymorphic_args(cls_or_mappers, selectable) self._with_polymorphic = mappers self._polymorphic_discriminator = polymorphic_on self.selectable = from_obj query._mapper_loads_polymorphically_with(self.mapper, sql_util.ColumnAdapter(from_obj, self.mapper._equivalent_columns))
-3,790,119,450,206,022,700
Receive an update from a call to query.with_polymorphic(). Note the newer style of using a free standing with_polymporphic() construct doesn't make use of this method.
lib/sqlalchemy/orm/query.py
set_with_polymorphic
slafs/sqlalchemy
python
def set_with_polymorphic(self, query, cls_or_mappers, selectable, polymorphic_on): "Receive an update from a call to query.with_polymorphic().\n\n Note the newer style of using a free standing with_polymporphic()\n construct doesn't make use of this method.\n\n\n " if self.is_aliased_class: raise NotImplementedError("Can't use with_polymorphic() against an Aliased object") if (cls_or_mappers is None): query._reset_polymorphic_adapter(self.mapper) return (mappers, from_obj) = self.mapper._with_polymorphic_args(cls_or_mappers, selectable) self._with_polymorphic = mappers self._polymorphic_discriminator = polymorphic_on self.selectable = from_obj query._mapper_loads_polymorphically_with(self.mapper, sql_util.ColumnAdapter(from_obj, self.mapper._equivalent_columns))
def __init__(self, name, *exprs, **kw): 'Construct a new :class:`.Bundle`.\n\n e.g.::\n\n bn = Bundle("mybundle", MyClass.x, MyClass.y)\n\n for row in session.query(bn).filter(\n bn.c.x == 5).filter(bn.c.y == 4):\n print(row.mybundle.x, row.mybundle.y)\n\n :param name: name of the bundle.\n :param \\*exprs: columns or SQL expressions comprising the bundle.\n :param single_entity=False: if True, rows for this :class:`.Bundle`\n can be returned as a "single entity" outside of any enclosing tuple\n in the same manner as a mapped entity.\n\n ' self.name = self._label = name self.exprs = exprs self.c = self.columns = ColumnCollection() self.columns.update(((getattr(col, 'key', col._label), col) for col in exprs)) self.single_entity = kw.pop('single_entity', self.single_entity)
4,016,396,337,611,967,000
Construct a new :class:`.Bundle`. e.g.:: bn = Bundle("mybundle", MyClass.x, MyClass.y) for row in session.query(bn).filter( bn.c.x == 5).filter(bn.c.y == 4): print(row.mybundle.x, row.mybundle.y) :param name: name of the bundle. :param \*exprs: columns or SQL expressions comprising the bundle. :param single_entity=False: if True, rows for this :class:`.Bundle` can be returned as a "single entity" outside of any enclosing tuple in the same manner as a mapped entity.
lib/sqlalchemy/orm/query.py
__init__
slafs/sqlalchemy
python
def __init__(self, name, *exprs, **kw): 'Construct a new :class:`.Bundle`.\n\n e.g.::\n\n bn = Bundle("mybundle", MyClass.x, MyClass.y)\n\n for row in session.query(bn).filter(\n bn.c.x == 5).filter(bn.c.y == 4):\n print(row.mybundle.x, row.mybundle.y)\n\n :param name: name of the bundle.\n :param \\*exprs: columns or SQL expressions comprising the bundle.\n :param single_entity=False: if True, rows for this :class:`.Bundle`\n can be returned as a "single entity" outside of any enclosing tuple\n in the same manner as a mapped entity.\n\n ' self.name = self._label = name self.exprs = exprs self.c = self.columns = ColumnCollection() self.columns.update(((getattr(col, 'key', col._label), col) for col in exprs)) self.single_entity = kw.pop('single_entity', self.single_entity)
def label(self, name): 'Provide a copy of this :class:`.Bundle` passing a new label.' cloned = self._clone() cloned.name = name return cloned
-4,030,217,709,794,036,000
Provide a copy of this :class:`.Bundle` passing a new label.
lib/sqlalchemy/orm/query.py
label
slafs/sqlalchemy
python
def label(self, name): cloned = self._clone() cloned.name = name return cloned
def create_row_processor(self, query, procs, labels): 'Produce the "row processing" function for this :class:`.Bundle`.\n\n May be overridden by subclasses.\n\n .. seealso::\n\n :ref:`bundles` - includes an example of subclassing.\n\n ' keyed_tuple = util.lightweight_named_tuple('result', labels) def proc(row): return keyed_tuple([proc(row) for proc in procs]) return proc
5,742,511,179,740,265,000
Produce the "row processing" function for this :class:`.Bundle`. May be overridden by subclasses. .. seealso:: :ref:`bundles` - includes an example of subclassing.
lib/sqlalchemy/orm/query.py
create_row_processor
slafs/sqlalchemy
python
def create_row_processor(self, query, procs, labels): 'Produce the "row processing" function for this :class:`.Bundle`.\n\n May be overridden by subclasses.\n\n .. seealso::\n\n :ref:`bundles` - includes an example of subclassing.\n\n ' keyed_tuple = util.lightweight_named_tuple('result', labels) def proc(row): return keyed_tuple([proc(row) for proc in procs]) return proc
def __init__(self, alias): 'Return a :class:`.MapperOption` that will indicate to the :class:`.Query`\n that the main table has been aliased.\n\n This is a seldom-used option to suit the\n very rare case that :func:`.contains_eager`\n is being used in conjunction with a user-defined SELECT\n statement that aliases the parent table. E.g.::\n\n # define an aliased UNION called \'ulist\'\n ulist = users.select(users.c.user_id==7).\\\n union(users.select(users.c.user_id>7)).\\\n alias(\'ulist\')\n\n # add on an eager load of "addresses"\n statement = ulist.outerjoin(addresses).\\\n select().apply_labels()\n\n # create query, indicating "ulist" will be an\n # alias for the main table, "addresses"\n # property should be eager loaded\n query = session.query(User).options(\n contains_alias(ulist),\n contains_eager(User.addresses))\n\n # then get results via the statement\n results = query.from_statement(statement).all()\n\n :param alias: is the string name of an alias, or a\n :class:`~.sql.expression.Alias` object representing\n the alias.\n\n ' self.alias = alias
-3,389,426,055,958,328,300
Return a :class:`.MapperOption` that will indicate to the :class:`.Query` that the main table has been aliased. This is a seldom-used option to suit the very rare case that :func:`.contains_eager` is being used in conjunction with a user-defined SELECT statement that aliases the parent table. E.g.:: # define an aliased UNION called 'ulist' ulist = users.select(users.c.user_id==7).\ union(users.select(users.c.user_id>7)).\ alias('ulist') # add on an eager load of "addresses" statement = ulist.outerjoin(addresses).\ select().apply_labels() # create query, indicating "ulist" will be an # alias for the main table, "addresses" # property should be eager loaded query = session.query(User).options( contains_alias(ulist), contains_eager(User.addresses)) # then get results via the statement results = query.from_statement(statement).all() :param alias: is the string name of an alias, or a :class:`~.sql.expression.Alias` object representing the alias.
lib/sqlalchemy/orm/query.py
__init__
slafs/sqlalchemy
python
def __init__(self, alias): 'Return a :class:`.MapperOption` that will indicate to the :class:`.Query`\n that the main table has been aliased.\n\n This is a seldom-used option to suit the\n very rare case that :func:`.contains_eager`\n is being used in conjunction with a user-defined SELECT\n statement that aliases the parent table. E.g.::\n\n # define an aliased UNION called \'ulist\'\n ulist = users.select(users.c.user_id==7).\\\n union(users.select(users.c.user_id>7)).\\\n alias(\'ulist\')\n\n # add on an eager load of "addresses"\n statement = ulist.outerjoin(addresses).\\\n select().apply_labels()\n\n # create query, indicating "ulist" will be an\n # alias for the main table, "addresses"\n # property should be eager loaded\n query = session.query(User).options(\n contains_alias(ulist),\n contains_eager(User.addresses))\n\n # then get results via the statement\n results = query.from_statement(statement).all()\n\n :param alias: is the string name of an alias, or a\n :class:`~.sql.expression.Alias` object representing\n the alias.\n\n ' self.alias = alias
def piter(self, storage=None, dynamic=False): 'Iterate over time series components in parallel.\n\n This allows you to iterate over a time series while dispatching\n individual components of that time series to different processors or\n processor groups. If the parallelism strategy was set to be\n multi-processor (by "parallel = N" where N is an integer when the\n DatasetSeries was created) this will issue each dataset to an\n N-processor group. For instance, this would allow you to start a 1024\n processor job, loading up 100 datasets in a time series and creating 8\n processor groups of 128 processors each, each of which would be\n assigned a different dataset. This could be accomplished as shown in\n the examples below. The *storage* option is as seen in\n :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects`\n which is a mechanism for storing results of analysis on an individual\n dataset and then combining the results at the end, so that the entire\n set of processors have access to those results.\n\n Note that supplying a *store* changes the iteration mechanism; see\n below.\n\n Parameters\n ----------\n storage : dict\n This is a dictionary, which will be filled with results during the\n course of the iteration. The keys will be the dataset\n indices and the values will be whatever is assigned to the *result*\n attribute on the storage during iteration.\n dynamic : boolean\n This governs whether or not dynamic load balancing will be\n enabled. This requires one dedicated processor; if this\n is enabled with a set of 128 processors available, only\n 127 will be available to iterate over objects as one will\n be load balancing the rest.\n\n\n Examples\n --------\n Here is an example of iteration when the results do not need to be\n stored. One processor will be assigned to each dataset.\n\n >>> ts = DatasetSeries("DD*/DD*.index")\n >>> for ds in ts.piter():\n ... SlicePlot(ds, "x", "Density").save()\n ...\n \n This demonstrates how one might store results:\n\n >>> def print_time(ds):\n ... print ds.current_time\n ...\n >>> ts = DatasetSeries("DD*/DD*.index",\n ... setup_function = print_time )\n ...\n >>> my_storage = {}\n >>> for sto, ds in ts.piter(storage=my_storage):\n ... v, c = ds.find_max("density")\n ... sto.result = (v, c)\n ...\n >>> for i, (v, c) in sorted(my_storage.items()):\n ... print "% 4i %0.3e" % (i, v)\n ...\n\n This shows how to dispatch 4 processors to each dataset:\n\n >>> ts = DatasetSeries("DD*/DD*.index",\n ... parallel = 4)\n >>> for ds in ts.piter():\n ... ProjectionPlot(ds, "x", "Density").save()\n ...\n\n ' if (self.parallel is False): njobs = 1 elif (dynamic is False): if (self.parallel is True): njobs = (- 1) else: njobs = self.parallel else: my_communicator = communication_system.communicators[(- 1)] nsize = my_communicator.size if (nsize == 1): self.parallel = False dynamic = False njobs = 1 else: njobs = (nsize - 1) for output in parallel_objects(self._pre_outputs, njobs=njobs, storage=storage, dynamic=dynamic): if (storage is not None): (sto, output) = output if isinstance(output, string_types): ds = self._load(output, **self.kwargs) self._setup_function(ds) else: ds = output if (storage is not None): next_ret = (sto, ds) else: next_ret = ds (yield next_ret)
-6,974,026,987,528,458,000
Iterate over time series components in parallel. This allows you to iterate over a time series while dispatching individual components of that time series to different processors or processor groups. If the parallelism strategy was set to be multi-processor (by "parallel = N" where N is an integer when the DatasetSeries was created) this will issue each dataset to an N-processor group. For instance, this would allow you to start a 1024 processor job, loading up 100 datasets in a time series and creating 8 processor groups of 128 processors each, each of which would be assigned a different dataset. This could be accomplished as shown in the examples below. The *storage* option is as seen in :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects` which is a mechanism for storing results of analysis on an individual dataset and then combining the results at the end, so that the entire set of processors have access to those results. Note that supplying a *store* changes the iteration mechanism; see below. Parameters ---------- storage : dict This is a dictionary, which will be filled with results during the course of the iteration. The keys will be the dataset indices and the values will be whatever is assigned to the *result* attribute on the storage during iteration. dynamic : boolean This governs whether or not dynamic load balancing will be enabled. This requires one dedicated processor; if this is enabled with a set of 128 processors available, only 127 will be available to iterate over objects as one will be load balancing the rest. Examples -------- Here is an example of iteration when the results do not need to be stored. One processor will be assigned to each dataset. >>> ts = DatasetSeries("DD*/DD*.index") >>> for ds in ts.piter(): ... SlicePlot(ds, "x", "Density").save() ... This demonstrates how one might store results: >>> def print_time(ds): ... print ds.current_time ... >>> ts = DatasetSeries("DD*/DD*.index", ... setup_function = print_time ) ... >>> my_storage = {} >>> for sto, ds in ts.piter(storage=my_storage): ... v, c = ds.find_max("density") ... sto.result = (v, c) ... >>> for i, (v, c) in sorted(my_storage.items()): ... print "% 4i %0.3e" % (i, v) ... This shows how to dispatch 4 processors to each dataset: >>> ts = DatasetSeries("DD*/DD*.index", ... parallel = 4) >>> for ds in ts.piter(): ... ProjectionPlot(ds, "x", "Density").save() ...
yt/data_objects/time_series.py
piter
edilberto100/yt
python
def piter(self, storage=None, dynamic=False): 'Iterate over time series components in parallel.\n\n This allows you to iterate over a time series while dispatching\n individual components of that time series to different processors or\n processor groups. If the parallelism strategy was set to be\n multi-processor (by "parallel = N" where N is an integer when the\n DatasetSeries was created) this will issue each dataset to an\n N-processor group. For instance, this would allow you to start a 1024\n processor job, loading up 100 datasets in a time series and creating 8\n processor groups of 128 processors each, each of which would be\n assigned a different dataset. This could be accomplished as shown in\n the examples below. The *storage* option is as seen in\n :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects`\n which is a mechanism for storing results of analysis on an individual\n dataset and then combining the results at the end, so that the entire\n set of processors have access to those results.\n\n Note that supplying a *store* changes the iteration mechanism; see\n below.\n\n Parameters\n ----------\n storage : dict\n This is a dictionary, which will be filled with results during the\n course of the iteration. The keys will be the dataset\n indices and the values will be whatever is assigned to the *result*\n attribute on the storage during iteration.\n dynamic : boolean\n This governs whether or not dynamic load balancing will be\n enabled. This requires one dedicated processor; if this\n is enabled with a set of 128 processors available, only\n 127 will be available to iterate over objects as one will\n be load balancing the rest.\n\n\n Examples\n --------\n Here is an example of iteration when the results do not need to be\n stored. One processor will be assigned to each dataset.\n\n >>> ts = DatasetSeries("DD*/DD*.index")\n >>> for ds in ts.piter():\n ... SlicePlot(ds, "x", "Density").save()\n ...\n \n This demonstrates how one might store results:\n\n >>> def print_time(ds):\n ... print ds.current_time\n ...\n >>> ts = DatasetSeries("DD*/DD*.index",\n ... setup_function = print_time )\n ...\n >>> my_storage = {}\n >>> for sto, ds in ts.piter(storage=my_storage):\n ... v, c = ds.find_max("density")\n ... sto.result = (v, c)\n ...\n >>> for i, (v, c) in sorted(my_storage.items()):\n ... print "% 4i %0.3e" % (i, v)\n ...\n\n This shows how to dispatch 4 processors to each dataset:\n\n >>> ts = DatasetSeries("DD*/DD*.index",\n ... parallel = 4)\n >>> for ds in ts.piter():\n ... ProjectionPlot(ds, "x", "Density").save()\n ...\n\n ' if (self.parallel is False): njobs = 1 elif (dynamic is False): if (self.parallel is True): njobs = (- 1) else: njobs = self.parallel else: my_communicator = communication_system.communicators[(- 1)] nsize = my_communicator.size if (nsize == 1): self.parallel = False dynamic = False njobs = 1 else: njobs = (nsize - 1) for output in parallel_objects(self._pre_outputs, njobs=njobs, storage=storage, dynamic=dynamic): if (storage is not None): (sto, output) = output if isinstance(output, string_types): ds = self._load(output, **self.kwargs) self._setup_function(ds) else: ds = output if (storage is not None): next_ret = (sto, ds) else: next_ret = ds (yield next_ret)
@classmethod def from_filenames(cls, filenames, parallel=True, setup_function=None, **kwargs): 'Create a time series from either a filename pattern or a list of\n filenames.\n\n This method provides an easy way to create a\n :class:`~yt.data_objects.time_series.DatasetSeries`, given a set of\n filenames or a pattern that matches them. Additionally, it can set the\n parallelism strategy.\n\n Parameters\n ----------\n filenames : list or pattern\n This can either be a list of filenames (such as ["DD0001/DD0001",\n "DD0002/DD0002"]) or a pattern to match, such as\n "DD*/DD*.index"). If it\'s the former, they will be loaded in\n order. The latter will be identified with the glob module and then\n sorted.\n parallel : True, False or int\n This parameter governs the behavior when .piter() is called on the\n resultant DatasetSeries object. If this is set to False, the time\n series will not iterate in parallel when .piter() is called. If\n this is set to either True or an integer, it will be iterated with\n 1 or that integer number of processors assigned to each parameter\n file provided to the loop.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n\n Examples\n --------\n\n >>> def print_time(ds):\n ... print ds.current_time\n ...\n >>> ts = DatasetSeries.from_filenames(\n ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0",\n ... setup_function = print_time)\n ...\n >>> for ds in ts:\n ... SlicePlot(ds, "x", "Density").save()\n\n ' if isinstance(filenames, string_types): filenames = get_filenames_from_glob_pattern(filenames) for fn in filenames: if (not isinstance(fn, string_types)): raise YTOutputNotIdentified('DataSeries accepts a list of strings, but received {0}'.format(fn)) obj = cls(filenames[:], parallel=parallel, setup_function=setup_function, **kwargs) return obj
-6,576,160,609,459,206,000
Create a time series from either a filename pattern or a list of filenames. This method provides an easy way to create a :class:`~yt.data_objects.time_series.DatasetSeries`, given a set of filenames or a pattern that matches them. Additionally, it can set the parallelism strategy. Parameters ---------- filenames : list or pattern This can either be a list of filenames (such as ["DD0001/DD0001", "DD0002/DD0002"]) or a pattern to match, such as "DD*/DD*.index"). If it's the former, they will be loaded in order. The latter will be identified with the glob module and then sorted. parallel : True, False or int This parameter governs the behavior when .piter() is called on the resultant DatasetSeries object. If this is set to False, the time series will not iterate in parallel when .piter() is called. If this is set to either True or an integer, it will be iterated with 1 or that integer number of processors assigned to each parameter file provided to the loop. setup_function : callable, accepts a ds This function will be called whenever a dataset is loaded. Examples -------- >>> def print_time(ds): ... print ds.current_time ... >>> ts = DatasetSeries.from_filenames( ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0", ... setup_function = print_time) ... >>> for ds in ts: ... SlicePlot(ds, "x", "Density").save()
yt/data_objects/time_series.py
from_filenames
edilberto100/yt
python
@classmethod def from_filenames(cls, filenames, parallel=True, setup_function=None, **kwargs): 'Create a time series from either a filename pattern or a list of\n filenames.\n\n This method provides an easy way to create a\n :class:`~yt.data_objects.time_series.DatasetSeries`, given a set of\n filenames or a pattern that matches them. Additionally, it can set the\n parallelism strategy.\n\n Parameters\n ----------\n filenames : list or pattern\n This can either be a list of filenames (such as ["DD0001/DD0001",\n "DD0002/DD0002"]) or a pattern to match, such as\n "DD*/DD*.index"). If it\'s the former, they will be loaded in\n order. The latter will be identified with the glob module and then\n sorted.\n parallel : True, False or int\n This parameter governs the behavior when .piter() is called on the\n resultant DatasetSeries object. If this is set to False, the time\n series will not iterate in parallel when .piter() is called. If\n this is set to either True or an integer, it will be iterated with\n 1 or that integer number of processors assigned to each parameter\n file provided to the loop.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n\n Examples\n --------\n\n >>> def print_time(ds):\n ... print ds.current_time\n ...\n >>> ts = DatasetSeries.from_filenames(\n ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0",\n ... setup_function = print_time)\n ...\n >>> for ds in ts:\n ... SlicePlot(ds, "x", "Density").save()\n\n ' if isinstance(filenames, string_types): filenames = get_filenames_from_glob_pattern(filenames) for fn in filenames: if (not isinstance(fn, string_types)): raise YTOutputNotIdentified('DataSeries accepts a list of strings, but received {0}'.format(fn)) obj = cls(filenames[:], parallel=parallel, setup_function=setup_function, **kwargs) return obj
def particle_trajectories(self, indices, fields=None, suppress_logging=False, ptype=None): 'Create a collection of particle trajectories in time over a series of\n datasets.\n\n Parameters\n ----------\n indices : array_like\n An integer array of particle indices whose trajectories we\n want to track. If they are not sorted they will be sorted.\n fields : list of strings, optional\n A set of fields that is retrieved when the trajectory\n collection is instantiated. Default: None (will default\n to the fields \'particle_position_x\', \'particle_position_y\',\n \'particle_position_z\')\n suppress_logging : boolean\n Suppress yt\'s logging when iterating over the simulation time\n series. Default: False\n ptype : str, optional\n Only use this particle type. Default: None, which uses all particle type.\n\n Examples\n --------\n >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")\n >>> my_fns.sort()\n >>> fields = ["particle_position_x", "particle_position_y",\n >>> "particle_position_z", "particle_velocity_x",\n >>> "particle_velocity_y", "particle_velocity_z"]\n >>> ds = load(my_fns[0])\n >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))\n >>> indices = init_sphere["particle_index"].astype("int")\n >>> ts = DatasetSeries(my_fns)\n >>> trajs = ts.particle_trajectories(indices, fields=fields)\n >>> for t in trajs :\n >>> print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()\n\n Note\n ----\n This function will fail if there are duplicate particle ids or if some of the particle\n disappear.\n ' return ParticleTrajectories(self, indices, fields=fields, suppress_logging=suppress_logging, ptype=ptype)
4,003,370,931,211,506,000
Create a collection of particle trajectories in time over a series of datasets. Parameters ---------- indices : array_like An integer array of particle indices whose trajectories we want to track. If they are not sorted they will be sorted. fields : list of strings, optional A set of fields that is retrieved when the trajectory collection is instantiated. Default: None (will default to the fields 'particle_position_x', 'particle_position_y', 'particle_position_z') suppress_logging : boolean Suppress yt's logging when iterating over the simulation time series. Default: False ptype : str, optional Only use this particle type. Default: None, which uses all particle type. Examples -------- >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]") >>> my_fns.sort() >>> fields = ["particle_position_x", "particle_position_y", >>> "particle_position_z", "particle_velocity_x", >>> "particle_velocity_y", "particle_velocity_z"] >>> ds = load(my_fns[0]) >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary")) >>> indices = init_sphere["particle_index"].astype("int") >>> ts = DatasetSeries(my_fns) >>> trajs = ts.particle_trajectories(indices, fields=fields) >>> for t in trajs : >>> print t["particle_velocity_x"].max(), t["particle_velocity_x"].min() Note ---- This function will fail if there are duplicate particle ids or if some of the particle disappear.
yt/data_objects/time_series.py
particle_trajectories
edilberto100/yt
python
def particle_trajectories(self, indices, fields=None, suppress_logging=False, ptype=None): 'Create a collection of particle trajectories in time over a series of\n datasets.\n\n Parameters\n ----------\n indices : array_like\n An integer array of particle indices whose trajectories we\n want to track. If they are not sorted they will be sorted.\n fields : list of strings, optional\n A set of fields that is retrieved when the trajectory\n collection is instantiated. Default: None (will default\n to the fields \'particle_position_x\', \'particle_position_y\',\n \'particle_position_z\')\n suppress_logging : boolean\n Suppress yt\'s logging when iterating over the simulation time\n series. Default: False\n ptype : str, optional\n Only use this particle type. Default: None, which uses all particle type.\n\n Examples\n --------\n >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")\n >>> my_fns.sort()\n >>> fields = ["particle_position_x", "particle_position_y",\n >>> "particle_position_z", "particle_velocity_x",\n >>> "particle_velocity_y", "particle_velocity_z"]\n >>> ds = load(my_fns[0])\n >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))\n >>> indices = init_sphere["particle_index"].astype("int")\n >>> ts = DatasetSeries(my_fns)\n >>> trajs = ts.particle_trajectories(indices, fields=fields)\n >>> for t in trajs :\n >>> print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()\n\n Note\n ----\n This function will fail if there are duplicate particle ids or if some of the particle\n disappear.\n ' return ParticleTrajectories(self, indices, fields=fields, suppress_logging=suppress_logging, ptype=ptype)
def __init__(self, parameter_filename, find_outputs=False): '\n Base class for generating simulation time series types.\n Principally consists of a *parameter_filename*.\n ' if (not os.path.exists(parameter_filename)): raise IOError(parameter_filename) self.parameter_filename = parameter_filename self.basename = os.path.basename(parameter_filename) self.directory = os.path.dirname(parameter_filename) self.parameters = {} self.key_parameters = [] self._set_parameter_defaults() self._parse_parameter_file() self._set_units() self._calculate_simulation_bounds() self._get_all_outputs(find_outputs=find_outputs) self.print_key_parameters()
-1,257,832,470,454,142,500
Base class for generating simulation time series types. Principally consists of a *parameter_filename*.
yt/data_objects/time_series.py
__init__
edilberto100/yt
python
def __init__(self, parameter_filename, find_outputs=False): '\n Base class for generating simulation time series types.\n Principally consists of a *parameter_filename*.\n ' if (not os.path.exists(parameter_filename)): raise IOError(parameter_filename) self.parameter_filename = parameter_filename self.basename = os.path.basename(parameter_filename) self.directory = os.path.dirname(parameter_filename) self.parameters = {} self.key_parameters = [] self._set_parameter_defaults() self._parse_parameter_file() self._set_units() self._calculate_simulation_bounds() self._get_all_outputs(find_outputs=find_outputs) self.print_key_parameters()
@parallel_root_only def print_key_parameters(self): '\n Print out some key parameters for the simulation.\n ' if (self.simulation_type == 'grid'): for a in ['domain_dimensions', 'domain_left_edge', 'domain_right_edge']: self._print_attr(a) for a in ['initial_time', 'final_time', 'cosmological_simulation']: self._print_attr(a) if getattr(self, 'cosmological_simulation', False): for a in ['box_size', 'omega_matter', 'omega_lambda', 'omega_radiation', 'hubble_constant', 'initial_redshift', 'final_redshift']: self._print_attr(a) for a in self.key_parameters: self._print_attr(a) mylog.info(('Total datasets: %d.' % len(self.all_outputs)))
-3,723,327,881,550,641,000
Print out some key parameters for the simulation.
yt/data_objects/time_series.py
print_key_parameters
edilberto100/yt
python
@parallel_root_only def print_key_parameters(self): '\n \n ' if (self.simulation_type == 'grid'): for a in ['domain_dimensions', 'domain_left_edge', 'domain_right_edge']: self._print_attr(a) for a in ['initial_time', 'final_time', 'cosmological_simulation']: self._print_attr(a) if getattr(self, 'cosmological_simulation', False): for a in ['box_size', 'omega_matter', 'omega_lambda', 'omega_radiation', 'hubble_constant', 'initial_redshift', 'final_redshift']: self._print_attr(a) for a in self.key_parameters: self._print_attr(a) mylog.info(('Total datasets: %d.' % len(self.all_outputs)))
def _print_attr(self, a): '\n Print the attribute or warn about it missing.\n ' if (not hasattr(self, a)): mylog.error('Missing %s in dataset definition!', a) return v = getattr(self, a) mylog.info('Parameters: %-25s = %s', a, v)
7,750,444,500,331,655,000
Print the attribute or warn about it missing.
yt/data_objects/time_series.py
_print_attr
edilberto100/yt
python
def _print_attr(self, a): '\n \n ' if (not hasattr(self, a)): mylog.error('Missing %s in dataset definition!', a) return v = getattr(self, a) mylog.info('Parameters: %-25s = %s', a, v)
def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None): "\n Get datasets at or near to given values.\n\n Parameters\n ----------\n key: str\n The key by which to retrieve outputs, usually 'time' or\n 'redshift'.\n values: array_like\n A list of values, given as floats.\n tolerance : float\n If not None, do not return a dataset unless the value is\n within the tolerance value. If None, simply return the\n nearest dataset.\n Default: None.\n outputs : list\n The list of outputs from which to choose. If None,\n self.all_outputs is used.\n Default: None.\n\n Examples\n --------\n >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)\n\n " if (not isinstance(values, YTArray)): if (isinstance(values, tuple) and (len(values) == 2)): values = self.arr(*values) else: values = self.arr(values) values = values.in_base() if (outputs is None): outputs = self.all_outputs my_outputs = [] if (not outputs): return my_outputs for value in values: outputs.sort(key=(lambda obj: np.abs((value - obj[key])))) if (((tolerance is None) or (np.abs((value - outputs[0][key])) <= tolerance)) and (outputs[0] not in my_outputs)): my_outputs.append(outputs[0]) else: mylog.error('No dataset added for %s = %f.', key, value) outputs.sort(key=(lambda obj: obj['time'])) return my_outputs
1,400,193,438,704,522,500
Get datasets at or near to given values. Parameters ---------- key: str The key by which to retrieve outputs, usually 'time' or 'redshift'. values: array_like A list of values, given as floats. tolerance : float If not None, do not return a dataset unless the value is within the tolerance value. If None, simply return the nearest dataset. Default: None. outputs : list The list of outputs from which to choose. If None, self.all_outputs is used. Default: None. Examples -------- >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
yt/data_objects/time_series.py
_get_outputs_by_key
edilberto100/yt
python
def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None): "\n Get datasets at or near to given values.\n\n Parameters\n ----------\n key: str\n The key by which to retrieve outputs, usually 'time' or\n 'redshift'.\n values: array_like\n A list of values, given as floats.\n tolerance : float\n If not None, do not return a dataset unless the value is\n within the tolerance value. If None, simply return the\n nearest dataset.\n Default: None.\n outputs : list\n The list of outputs from which to choose. If None,\n self.all_outputs is used.\n Default: None.\n\n Examples\n --------\n >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)\n\n " if (not isinstance(values, YTArray)): if (isinstance(values, tuple) and (len(values) == 2)): values = self.arr(*values) else: values = self.arr(values) values = values.in_base() if (outputs is None): outputs = self.all_outputs my_outputs = [] if (not outputs): return my_outputs for value in values: outputs.sort(key=(lambda obj: np.abs((value - obj[key])))) if (((tolerance is None) or (np.abs((value - outputs[0][key])) <= tolerance)) and (outputs[0] not in my_outputs)): my_outputs.append(outputs[0]) else: mylog.error('No dataset added for %s = %f.', key, value) outputs.sort(key=(lambda obj: obj['time'])) return my_outputs
def doJob(self, http_res, backend, dbms, parent=None): 'This method do a Job.' self.payloads['revisable'] = ('True' if (self.doReturn is False) else 'False') self.settings = self.generate_payloads(http_res, parent=parent) return self.settings
2,761,890,933,076,850,700
This method do a Job.
core/attack/mod_unfilter.py
doJob
qazbnm456/VWGen
python
def doJob(self, http_res, backend, dbms, parent=None): self.payloads['revisable'] = ('True' if (self.doReturn is False) else 'False') self.settings = self.generate_payloads(http_res, parent=parent) return self.settings
def to_haystack(unit): '\n Some parsing tweaks to fit pint units / handling of edge cases.\n ' global HAYSTACK_CONVERSION global PINT_CONVERSION if ((unit == u'per_minute') or (unit == u'/min') or (unit == u'per_second') or (unit == u'/s') or (unit == u'per_hour') or (unit == u'/h') or (unit == None)): return u'' for (pint_value, haystack_value) in PINT_CONVERSION: unit = unit.replace(pint_value, haystack_value) for (haystack_value, pint_value) in HAYSTACK_CONVERSION: if (pint_value == u''): continue unit = unit.replace(pint_value, haystack_value) return unit
-2,415,941,517,977,249,000
Some parsing tweaks to fit pint units / handling of edge cases.
hszinc/pintutil.py
to_haystack
clarsen/hszinc
python
def to_haystack(unit): '\n \n ' global HAYSTACK_CONVERSION global PINT_CONVERSION if ((unit == u'per_minute') or (unit == u'/min') or (unit == u'per_second') or (unit == u'/s') or (unit == u'per_hour') or (unit == u'/h') or (unit == None)): return u for (pint_value, haystack_value) in PINT_CONVERSION: unit = unit.replace(pint_value, haystack_value) for (haystack_value, pint_value) in HAYSTACK_CONVERSION: if (pint_value == u): continue unit = unit.replace(pint_value, haystack_value) return unit
def to_pint(unit): '\n Some parsing tweaks to fit pint units / handling of edge cases.\n ' global HAYSTACK_CONVERSION if ((unit == u'per_minute') or (unit == u'/min') or (unit == u'per_second') or (unit == u'/s') or (unit == u'per_hour') or (unit == u'/h') or (unit == None)): return '' for (haystack_value, pint_value) in HAYSTACK_CONVERSION: unit = unit.replace(haystack_value, pint_value) return unit
3,679,479,176,463,237,000
Some parsing tweaks to fit pint units / handling of edge cases.
hszinc/pintutil.py
to_pint
clarsen/hszinc
python
def to_pint(unit): '\n \n ' global HAYSTACK_CONVERSION if ((unit == u'per_minute') or (unit == u'/min') or (unit == u'per_second') or (unit == u'/s') or (unit == u'per_hour') or (unit == u'/h') or (unit == None)): return for (haystack_value, pint_value) in HAYSTACK_CONVERSION: unit = unit.replace(haystack_value, pint_value) return unit
def define_haystack_units(): '\n Missing units found in project-haystack\n Added to the registry\n ' ureg = UnitRegistry(on_redefinition='ignore') ureg.define(u'% = [] = percent') ureg.define(u'pixel = [] = px = dot = picture_element = pel') ureg.define(u'decibel = [] = dB') ureg.define(u'ppu = [] = parts_per_unit') ureg.define(u'ppm = [] = parts_per_million') ureg.define(u'ppb = [] = parts_per_billion') ureg.define(u'%RH = [] = percent_relative_humidity = percentRH') ureg.define(u'cubic_feet = ft ** 3 = cu_ft') ureg.define(u'cfm = cu_ft * minute = liter_per_second / 0.4719475') ureg.define(u'cfh = cu_ft * hour') ureg.define(u'cfs = cu_ft * second') ureg.define(u'VAR = volt * ampere') ureg.define(u'kVAR = 1000 * volt * ampere') ureg.define(u'MVAR = 1000000 * volt * ampere') ureg.define(u'inH2O = in_H2O') ureg.define(u'dry_air = []') ureg.define(u'gas = []') ureg.define(u'energy_efficiency_ratio = [] = EER') ureg.define(u'coefficient_of_performance = [] = COP') ureg.define(u'data_center_infrastructure_efficiency = [] = DCIE') ureg.define(u'power_usage_effectiveness = [] = PUE') ureg.define(u'formazin_nephelometric_unit = [] = fnu') ureg.define(u'nephelometric_turbidity_units = [] = ntu') ureg.define(u'power_factor = [] = PF') ureg.define(u'degree_day_celsius = [] = degdaysC') ureg.define(u'degree_day_farenheit = degree_day_celsius * 9 / 5 = degdaysF') ureg.define(u'footcandle = lumen / sq_ft = ftcd') ureg.define(u'Nm = newton * meter') ureg.define(u'%obsc = [] = percent_obscuration = percentobsc') ureg.define(u'cycle = []') ureg.define(u'cph = cycle / hour') ureg.define(u'cpm = cycle / minute') ureg.define(u'cps = cycle / second') ureg.define(u'hecto_cubic_foot = 100 * cubic_foot') ureg.define(u'tenths_second = second / 10') ureg.define(u'hundredths_second = second / 100') ureg.define(u'australian_dollar = [] = AUD') ureg.define(u'british_pound = [] = GBP = £') ureg.define(u'canadian_dollar = [] = CAD') ureg.define(u'chinese_yuan = [] = CNY = 元') ureg.define(u'emerati_dirham = [] = AED') ureg.define(u'euro = [] = EUR = €') ureg.define(u'indian_rupee = [] = INR = ₹') ureg.define(u'japanese_yen = [] = JPY = ¥') ureg.define(u'russian_ruble = [] = RUB = руб') ureg.define(u'south_korean_won = [] = KRW = ₩') ureg.define(u'swedish_krona = [] = SEK = kr') ureg.define(u'swiss_franc = [] = CHF = Fr') ureg.define(u'taiwan_dollar = [] = TWD') ureg.define(u'us_dollar = [] = USD = $') ureg.define(u'new_israeli_shekel = [] = NIS') return ureg
7,882,874,107,201,615,000
Missing units found in project-haystack Added to the registry
hszinc/pintutil.py
define_haystack_units
clarsen/hszinc
python
def define_haystack_units(): '\n Missing units found in project-haystack\n Added to the registry\n ' ureg = UnitRegistry(on_redefinition='ignore') ureg.define(u'% = [] = percent') ureg.define(u'pixel = [] = px = dot = picture_element = pel') ureg.define(u'decibel = [] = dB') ureg.define(u'ppu = [] = parts_per_unit') ureg.define(u'ppm = [] = parts_per_million') ureg.define(u'ppb = [] = parts_per_billion') ureg.define(u'%RH = [] = percent_relative_humidity = percentRH') ureg.define(u'cubic_feet = ft ** 3 = cu_ft') ureg.define(u'cfm = cu_ft * minute = liter_per_second / 0.4719475') ureg.define(u'cfh = cu_ft * hour') ureg.define(u'cfs = cu_ft * second') ureg.define(u'VAR = volt * ampere') ureg.define(u'kVAR = 1000 * volt * ampere') ureg.define(u'MVAR = 1000000 * volt * ampere') ureg.define(u'inH2O = in_H2O') ureg.define(u'dry_air = []') ureg.define(u'gas = []') ureg.define(u'energy_efficiency_ratio = [] = EER') ureg.define(u'coefficient_of_performance = [] = COP') ureg.define(u'data_center_infrastructure_efficiency = [] = DCIE') ureg.define(u'power_usage_effectiveness = [] = PUE') ureg.define(u'formazin_nephelometric_unit = [] = fnu') ureg.define(u'nephelometric_turbidity_units = [] = ntu') ureg.define(u'power_factor = [] = PF') ureg.define(u'degree_day_celsius = [] = degdaysC') ureg.define(u'degree_day_farenheit = degree_day_celsius * 9 / 5 = degdaysF') ureg.define(u'footcandle = lumen / sq_ft = ftcd') ureg.define(u'Nm = newton * meter') ureg.define(u'%obsc = [] = percent_obscuration = percentobsc') ureg.define(u'cycle = []') ureg.define(u'cph = cycle / hour') ureg.define(u'cpm = cycle / minute') ureg.define(u'cps = cycle / second') ureg.define(u'hecto_cubic_foot = 100 * cubic_foot') ureg.define(u'tenths_second = second / 10') ureg.define(u'hundredths_second = second / 100') ureg.define(u'australian_dollar = [] = AUD') ureg.define(u'british_pound = [] = GBP = £') ureg.define(u'canadian_dollar = [] = CAD') ureg.define(u'chinese_yuan = [] = CNY = 元') ureg.define(u'emerati_dirham = [] = AED') ureg.define(u'euro = [] = EUR = €') ureg.define(u'indian_rupee = [] = INR = ₹') ureg.define(u'japanese_yen = [] = JPY = ¥') ureg.define(u'russian_ruble = [] = RUB = руб') ureg.define(u'south_korean_won = [] = KRW = ₩') ureg.define(u'swedish_krona = [] = SEK = kr') ureg.define(u'swiss_franc = [] = CHF = Fr') ureg.define(u'taiwan_dollar = [] = TWD') ureg.define(u'us_dollar = [] = USD = $') ureg.define(u'new_israeli_shekel = [] = NIS') return ureg
def setupperenergy(self, value): ' Method to set the upper state energy.\n\n Parameters\n ----------\n value : float\n The value to set the upper state energy to.\n\n Returns\n -------\n None\n ' if isinstance(value, float): self.energies[1] = value elif isinstance(value, int): self.energies[1] = float(value) else: raise Exception('Energy must be a number')
-210,451,890,552,821,570
Method to set the upper state energy. Parameters ---------- value : float The value to set the upper state energy to. Returns ------- None
admit/util/Line.py
setupperenergy
astroumd/admit
python
def setupperenergy(self, value): ' Method to set the upper state energy.\n\n Parameters\n ----------\n value : float\n The value to set the upper state energy to.\n\n Returns\n -------\n None\n ' if isinstance(value, float): self.energies[1] = value elif isinstance(value, int): self.energies[1] = float(value) else: raise Exception('Energy must be a number')
def setlowerenergy(self, value): ' Method to set the lower state energy.\n\n Parameters\n ----------\n value : float\n The value to set the lower state energy to.\n\n Returns\n -------\n None\n ' if isinstance(value, float): self.energies[0] = value elif isinstance(value, int): self.energies[0] = float(value) else: raise Exception('Energy must be a number')
-5,033,974,829,888,944,000
Method to set the lower state energy. Parameters ---------- value : float The value to set the lower state energy to. Returns ------- None
admit/util/Line.py
setlowerenergy
astroumd/admit
python
def setlowerenergy(self, value): ' Method to set the lower state energy.\n\n Parameters\n ----------\n value : float\n The value to set the lower state energy to.\n\n Returns\n -------\n None\n ' if isinstance(value, float): self.energies[0] = value elif isinstance(value, int): self.energies[0] = float(value) else: raise Exception('Energy must be a number')
def getlowerenergy(self): ' Method to get the lower state energy.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Float of the lower state energy.\n\n ' return self.energies[0]
4,097,149,889,143,395,000
Method to get the lower state energy. Parameters ---------- None Returns ------- Float of the lower state energy.
admit/util/Line.py
getlowerenergy
astroumd/admit
python
def getlowerenergy(self): ' Method to get the lower state energy.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Float of the lower state energy.\n\n ' return self.energies[0]
def getupperenergy(self): ' Method to get the upper state energy.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Float of the upper state energy.\n\n ' return self.energies[1]
-2,025,381,944,315,756,000
Method to get the upper state energy. Parameters ---------- None Returns ------- Float of the upper state energy.
admit/util/Line.py
getupperenergy
astroumd/admit
python
def getupperenergy(self): ' Method to get the upper state energy.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Float of the upper state energy.\n\n ' return self.energies[1]
def setkey(self, name='', value=''): '\n set keys, two styles are possible:\n\n 1. name = {key:val} e.g. **setkey({"a":1})**\n\n 2. name = "key", value = val e.g. **setkey("a", 1)**\n\n This method checks the type of the keyword value, as it must\n remain the same. Also new keywords cannot be added.\n\n Parameters\n ----------\n\n name : dictionary or string\n Dictionary of keyword value pais to set or a string with the name\n of a single key\n\n value : any\n The value to change the keyword to\n\n Returns\n -------\n None\n ' if isinstance(name, dict): for (k, v) in name.iteritems(): if hasattr(self, k): if (type(v) == type(getattr(self, k))): if ((k == 'energies') and (not isinstance(v, list)) and (len(v) != 2)): raise Exception('Energies must be a list in the format [lower, upper], use setupperenergy or setlowerenergy to set them individually.') setattr(self, k, v) else: raise Exception(('Cannot change data type for %s, expected %s but got %s' % (k, str(type(getattr(self, k))), str(type(v))))) else: raise Exception(('Invalid key given to Line class: %s' % k)) elif (not (name == '')): if hasattr(self, name): if (type(value) == type(getattr(self, name))): if ((name == 'energies') and (not isinstance(value, list)) and (len(value) != 2)): raise Exception('Energies must be a list in the format [lower, upper], use setupperenergy or setlowerenergy to set them individually.') setattr(self, name, value) else: raise Exception(('Cannot change data type for %s, expected %s but got %s' % (name, str(type(getattr(self, name))), str(type(value))))) else: raise Exception(('Invalid key given to Line class: %s' % name)) else: raise Exception('Invalid name parameter given, it must be a string or a dictionary of keys:values.')
8,722,881,559,404,423,000
set keys, two styles are possible: 1. name = {key:val} e.g. **setkey({"a":1})** 2. name = "key", value = val e.g. **setkey("a", 1)** This method checks the type of the keyword value, as it must remain the same. Also new keywords cannot be added. Parameters ---------- name : dictionary or string Dictionary of keyword value pais to set or a string with the name of a single key value : any The value to change the keyword to Returns ------- None
admit/util/Line.py
setkey
astroumd/admit
python
def setkey(self, name=, value=): '\n set keys, two styles are possible:\n\n 1. name = {key:val} e.g. **setkey({"a":1})**\n\n 2. name = "key", value = val e.g. **setkey("a", 1)**\n\n This method checks the type of the keyword value, as it must\n remain the same. Also new keywords cannot be added.\n\n Parameters\n ----------\n\n name : dictionary or string\n Dictionary of keyword value pais to set or a string with the name\n of a single key\n\n value : any\n The value to change the keyword to\n\n Returns\n -------\n None\n ' if isinstance(name, dict): for (k, v) in name.iteritems(): if hasattr(self, k): if (type(v) == type(getattr(self, k))): if ((k == 'energies') and (not isinstance(v, list)) and (len(v) != 2)): raise Exception('Energies must be a list in the format [lower, upper], use setupperenergy or setlowerenergy to set them individually.') setattr(self, k, v) else: raise Exception(('Cannot change data type for %s, expected %s but got %s' % (k, str(type(getattr(self, k))), str(type(v))))) else: raise Exception(('Invalid key given to Line class: %s' % k)) elif (not (name == )): if hasattr(self, name): if (type(value) == type(getattr(self, name))): if ((name == 'energies') and (not isinstance(value, list)) and (len(value) != 2)): raise Exception('Energies must be a list in the format [lower, upper], use setupperenergy or setlowerenergy to set them individually.') setattr(self, name, value) else: raise Exception(('Cannot change data type for %s, expected %s but got %s' % (name, str(type(getattr(self, name))), str(type(value))))) else: raise Exception(('Invalid key given to Line class: %s' % name)) else: raise Exception('Invalid name parameter given, it must be a string or a dictionary of keys:values.')
def isequal(self, line): ' Experimental method to compare 2 line classes\n\n Parameters\n ----------\n line : Line\n The class to compare this one to.\n\n Returns\n -------\n Boolean whether or not the two classes contain the same data.\n\n ' try: for i in self.__dict__: if (cmp(getattr(self, i), getattr(line, i)) != 0): return False except: return False return True
-6,666,949,688,399,691,000
Experimental method to compare 2 line classes Parameters ---------- line : Line The class to compare this one to. Returns ------- Boolean whether or not the two classes contain the same data.
admit/util/Line.py
isequal
astroumd/admit
python
def isequal(self, line): ' Experimental method to compare 2 line classes\n\n Parameters\n ----------\n line : Line\n The class to compare this one to.\n\n Returns\n -------\n Boolean whether or not the two classes contain the same data.\n\n ' try: for i in self.__dict__: if (cmp(getattr(self, i), getattr(line, i)) != 0): return False except: return False return True
def _hash_bucket(df: pd.DataFrame, subset: Optional[Sequence[str]], num_buckets: int): '\n Categorize each row of `df` based on the data in the columns `subset`\n into `num_buckets` values. This is based on `pandas.util.hash_pandas_object`\n ' if (not subset): subset = df.columns hash_arr = pd.util.hash_pandas_object(df[subset], index=False) buckets = (hash_arr % num_buckets) available_bit_widths = np.array([8, 16, 32, 64]) mask = (available_bit_widths > np.log2(num_buckets)) bit_width = min(available_bit_widths[mask]) return df.assign(**{_KTK_HASH_BUCKET: buckets.astype(f'uint{bit_width}')})
-6,590,359,751,248,936,000
Categorize each row of `df` based on the data in the columns `subset` into `num_buckets` values. This is based on `pandas.util.hash_pandas_object`
kartothek/io/dask/_shuffle.py
_hash_bucket
MartinHaffner/kartothek
python
def _hash_bucket(df: pd.DataFrame, subset: Optional[Sequence[str]], num_buckets: int): '\n Categorize each row of `df` based on the data in the columns `subset`\n into `num_buckets` values. This is based on `pandas.util.hash_pandas_object`\n ' if (not subset): subset = df.columns hash_arr = pd.util.hash_pandas_object(df[subset], index=False) buckets = (hash_arr % num_buckets) available_bit_widths = np.array([8, 16, 32, 64]) mask = (available_bit_widths > np.log2(num_buckets)) bit_width = min(available_bit_widths[mask]) return df.assign(**{_KTK_HASH_BUCKET: buckets.astype(f'uint{bit_width}')})
def shuffle_store_dask_partitions(ddf: dd.DataFrame, table: str, secondary_indices: List[str], metadata_version: int, partition_on: List[str], store_factory: StoreFactory, df_serializer: Optional[DataFrameSerializer], dataset_uuid: str, num_buckets: int, sort_partitions_by: List[str], bucket_by: Sequence[str]) -> da.Array: '\n Perform a dataset update with dask reshuffling to control partitioning.\n\n The shuffle operation will perform the following steps\n\n 1. Pack payload data\n\n Payload data is serialized and compressed into a single byte value using\n ``distributed.protocol.serialize_bytes``, see also ``pack_payload``.\n\n 2. Apply bucketing\n\n Hash the column subset ``bucket_by`` and distribute the hashes in\n ``num_buckets`` bins/buckets. Internally every bucket is identified by an\n integer and we will create one physical file for every bucket ID. The\n bucket ID is not exposed to the user and is dropped after the shuffle,\n before the store. This is done since we do not want to guarantee at the\n moment, that the hash function remains stable.\n\n 3. Perform shuffle (dask.DataFrame.groupby.apply)\n\n The groupby key will be the combination of ``partition_on`` fields and the\n hash bucket ID. This will create a physical file for every unique tuple\n in ``partition_on + bucket_ID``. The function which is applied to the\n dataframe will perform all necessary subtask for storage of the dataset\n (partition_on, index calc, etc.).\n\n 4. Unpack data (within the apply-function)\n\n After the shuffle, the first step is to unpack the payload data since\n the follow up tasks will require the full dataframe.\n\n 5. Pre storage processing and parquet serialization\n\n We apply important pre storage processing like sorting data, applying\n final partitioning (at this time there should be only one group in the\n payload data but using the ``MetaPartition.partition_on`` guarantees the\n appropriate data structures kartothek expects are created.).\n After the preprocessing is done, the data is serialized and stored as\n parquet. The applied function will return an (empty) MetaPartition with\n indices and metadata which will then be used to commit the dataset.\n\n Returns\n -------\n\n A dask.Array holding relevant MetaPartition objects as values\n\n ' if (ddf.npartitions == 0): return ddf group_cols = partition_on.copy() if (num_buckets is None): raise ValueError('``num_buckets`` must not be None when shuffling data.') meta = ddf._meta meta[_KTK_HASH_BUCKET] = np.uint64(0) ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta) group_cols.append(_KTK_HASH_BUCKET) unpacked_meta = ddf._meta ddf = pack_payload(ddf, group_key=group_cols) ddf_grouped = ddf.groupby(by=group_cols) unpack = partial(_unpack_store_partition, secondary_indices=secondary_indices, sort_partitions_by=sort_partitions_by, table=table, dataset_uuid=dataset_uuid, partition_on=partition_on, store_factory=store_factory, df_serializer=df_serializer, metadata_version=metadata_version, unpacked_meta=unpacked_meta) return cast(da.Array, ddf_grouped.apply(unpack, meta=('MetaPartition', 'object')))
7,840,959,128,910,960,000
Perform a dataset update with dask reshuffling to control partitioning. The shuffle operation will perform the following steps 1. Pack payload data Payload data is serialized and compressed into a single byte value using ``distributed.protocol.serialize_bytes``, see also ``pack_payload``. 2. Apply bucketing Hash the column subset ``bucket_by`` and distribute the hashes in ``num_buckets`` bins/buckets. Internally every bucket is identified by an integer and we will create one physical file for every bucket ID. The bucket ID is not exposed to the user and is dropped after the shuffle, before the store. This is done since we do not want to guarantee at the moment, that the hash function remains stable. 3. Perform shuffle (dask.DataFrame.groupby.apply) The groupby key will be the combination of ``partition_on`` fields and the hash bucket ID. This will create a physical file for every unique tuple in ``partition_on + bucket_ID``. The function which is applied to the dataframe will perform all necessary subtask for storage of the dataset (partition_on, index calc, etc.). 4. Unpack data (within the apply-function) After the shuffle, the first step is to unpack the payload data since the follow up tasks will require the full dataframe. 5. Pre storage processing and parquet serialization We apply important pre storage processing like sorting data, applying final partitioning (at this time there should be only one group in the payload data but using the ``MetaPartition.partition_on`` guarantees the appropriate data structures kartothek expects are created.). After the preprocessing is done, the data is serialized and stored as parquet. The applied function will return an (empty) MetaPartition with indices and metadata which will then be used to commit the dataset. Returns ------- A dask.Array holding relevant MetaPartition objects as values
kartothek/io/dask/_shuffle.py
shuffle_store_dask_partitions
MartinHaffner/kartothek
python
def shuffle_store_dask_partitions(ddf: dd.DataFrame, table: str, secondary_indices: List[str], metadata_version: int, partition_on: List[str], store_factory: StoreFactory, df_serializer: Optional[DataFrameSerializer], dataset_uuid: str, num_buckets: int, sort_partitions_by: List[str], bucket_by: Sequence[str]) -> da.Array: '\n Perform a dataset update with dask reshuffling to control partitioning.\n\n The shuffle operation will perform the following steps\n\n 1. Pack payload data\n\n Payload data is serialized and compressed into a single byte value using\n ``distributed.protocol.serialize_bytes``, see also ``pack_payload``.\n\n 2. Apply bucketing\n\n Hash the column subset ``bucket_by`` and distribute the hashes in\n ``num_buckets`` bins/buckets. Internally every bucket is identified by an\n integer and we will create one physical file for every bucket ID. The\n bucket ID is not exposed to the user and is dropped after the shuffle,\n before the store. This is done since we do not want to guarantee at the\n moment, that the hash function remains stable.\n\n 3. Perform shuffle (dask.DataFrame.groupby.apply)\n\n The groupby key will be the combination of ``partition_on`` fields and the\n hash bucket ID. This will create a physical file for every unique tuple\n in ``partition_on + bucket_ID``. The function which is applied to the\n dataframe will perform all necessary subtask for storage of the dataset\n (partition_on, index calc, etc.).\n\n 4. Unpack data (within the apply-function)\n\n After the shuffle, the first step is to unpack the payload data since\n the follow up tasks will require the full dataframe.\n\n 5. Pre storage processing and parquet serialization\n\n We apply important pre storage processing like sorting data, applying\n final partitioning (at this time there should be only one group in the\n payload data but using the ``MetaPartition.partition_on`` guarantees the\n appropriate data structures kartothek expects are created.).\n After the preprocessing is done, the data is serialized and stored as\n parquet. The applied function will return an (empty) MetaPartition with\n indices and metadata which will then be used to commit the dataset.\n\n Returns\n -------\n\n A dask.Array holding relevant MetaPartition objects as values\n\n ' if (ddf.npartitions == 0): return ddf group_cols = partition_on.copy() if (num_buckets is None): raise ValueError('``num_buckets`` must not be None when shuffling data.') meta = ddf._meta meta[_KTK_HASH_BUCKET] = np.uint64(0) ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta) group_cols.append(_KTK_HASH_BUCKET) unpacked_meta = ddf._meta ddf = pack_payload(ddf, group_key=group_cols) ddf_grouped = ddf.groupby(by=group_cols) unpack = partial(_unpack_store_partition, secondary_indices=secondary_indices, sort_partitions_by=sort_partitions_by, table=table, dataset_uuid=dataset_uuid, partition_on=partition_on, store_factory=store_factory, df_serializer=df_serializer, metadata_version=metadata_version, unpacked_meta=unpacked_meta) return cast(da.Array, ddf_grouped.apply(unpack, meta=('MetaPartition', 'object')))
def _unpack_store_partition(df: pd.DataFrame, secondary_indices: List[str], sort_partitions_by: List[str], table: str, dataset_uuid: str, partition_on: List[str], store_factory: StoreFactory, df_serializer: DataFrameSerializer, metadata_version: int, unpacked_meta: pd.DataFrame) -> MetaPartition: 'Unpack payload data and store partition' df = unpack_payload_pandas(df, unpacked_meta) if (_KTK_HASH_BUCKET in df): df = df.drop(_KTK_HASH_BUCKET, axis=1) return write_partition(partition_df=df, secondary_indices=secondary_indices, sort_partitions_by=sort_partitions_by, dataset_table_name=table, dataset_uuid=dataset_uuid, partition_on=partition_on, store_factory=store_factory, df_serializer=df_serializer, metadata_version=metadata_version)
-6,418,627,830,148,800,000
Unpack payload data and store partition
kartothek/io/dask/_shuffle.py
_unpack_store_partition
MartinHaffner/kartothek
python
def _unpack_store_partition(df: pd.DataFrame, secondary_indices: List[str], sort_partitions_by: List[str], table: str, dataset_uuid: str, partition_on: List[str], store_factory: StoreFactory, df_serializer: DataFrameSerializer, metadata_version: int, unpacked_meta: pd.DataFrame) -> MetaPartition: df = unpack_payload_pandas(df, unpacked_meta) if (_KTK_HASH_BUCKET in df): df = df.drop(_KTK_HASH_BUCKET, axis=1) return write_partition(partition_df=df, secondary_indices=secondary_indices, sort_partitions_by=sort_partitions_by, dataset_table_name=table, dataset_uuid=dataset_uuid, partition_on=partition_on, store_factory=store_factory, df_serializer=df_serializer, metadata_version=metadata_version)
@app.get('/todoer/v1/tasks', status_code=200) async def root(request: Request, database=Depends(get_database), pagination: Tuple[(int, int)]=Depends(pagination)) -> dict: '\n GET tasks as html page\n ' tasks = (await database.get_all(*pagination)) return TEMPLATES.TemplateResponse('index.html', {'request': request, 'tasks': tasks})
6,508,956,269,686,269,000
GET tasks as html page
todoer_api/app/main.py
root
owlsong/todoer
python
@app.get('/todoer/v1/tasks', status_code=200) async def root(request: Request, database=Depends(get_database), pagination: Tuple[(int, int)]=Depends(pagination)) -> dict: '\n \n ' tasks = (await database.get_all(*pagination)) return TEMPLATES.TemplateResponse('index.html', {'request': request, 'tasks': tasks})
def data_generator(path, batch_size=8, input_shape=96, scale=2): 'data generator for fit_generator' fns = os.listdir(path) n = len(fns) i = 0 while True: (lrs, hrs) = ([], []) for b in range(batch_size): if (i == 0): np.random.shuffle(fns) fn = fns[i] fn = os.path.join(path, fn) (lr, hr) = utils.pair(fn, input_shape, scale) lr = utils.normalization(lr) hr = utils.normalization(hr) lrs.append(lr) hrs.append(hr) i = ((i + 1) % n) lrs = np.array(lrs) hrs = np.array(hrs) (yield (lrs, hrs))
-6,014,064,260,644,129,000
data generator for fit_generator
src/train.py
data_generator
zhaipro/keras-wdsr
python
def data_generator(path, batch_size=8, input_shape=96, scale=2): fns = os.listdir(path) n = len(fns) i = 0 while True: (lrs, hrs) = ([], []) for b in range(batch_size): if (i == 0): np.random.shuffle(fns) fn = fns[i] fn = os.path.join(path, fn) (lr, hr) = utils.pair(fn, input_shape, scale) lr = utils.normalization(lr) hr = utils.normalization(hr) lrs.append(lr) hrs.append(hr) i = ((i + 1) % n) lrs = np.array(lrs) hrs = np.array(hrs) (yield (lrs, hrs))
@property def peers(self) -> Mapping[(bytes, Peer)]: 'Returns a read-only copy of peers.' with self.lock: return self._peers.copy()
-9,164,602,136,683,299,000
Returns a read-only copy of peers.
electrum/lnworker.py
peers
jeroz1/electrum-ravencoin-utd
python
@property def peers(self) -> Mapping[(bytes, Peer)]: with self.lock: return self._peers.copy()
def get_node_alias(self, node_id: bytes) -> Optional[str]: 'Returns the alias of the node, or None if unknown.' node_alias = None if self.channel_db: node_info = self.channel_db.get_node_info_for_node_id(node_id) if node_info: node_alias = node_info.alias else: for (k, v) in hardcoded_trampoline_nodes().items(): if (v.pubkey == node_id): node_alias = k break return node_alias
115,511,388,044,440,000
Returns the alias of the node, or None if unknown.
electrum/lnworker.py
get_node_alias
jeroz1/electrum-ravencoin-utd
python
def get_node_alias(self, node_id: bytes) -> Optional[str]: node_alias = None if self.channel_db: node_info = self.channel_db.get_node_info_for_node_id(node_id) if node_info: node_alias = node_info.alias else: for (k, v) in hardcoded_trampoline_nodes().items(): if (v.pubkey == node_id): node_alias = k break return node_alias
def get_sync_progress_estimate(self) -> Tuple[(Optional[int], Optional[int], Optional[int])]: 'Estimates the gossip synchronization process and returns the number\n of synchronized channels, the total channels in the network and a\n rescaled percentage of the synchronization process.' if (self.num_peers() == 0): return (None, None, None) (nchans_with_0p, nchans_with_1p, nchans_with_2p) = self.channel_db.get_num_channels_partitioned_by_policy_count() num_db_channels = ((nchans_with_0p + nchans_with_1p) + nchans_with_2p) current_est = (num_db_channels - nchans_with_0p) total_est = (len(self.unknown_ids) + num_db_channels) progress = ((current_est / total_est) if (total_est and current_est) else 0) progress_percent = (((1.0 / 0.95) * progress) * 100) progress_percent = min(progress_percent, 100) progress_percent = round(progress_percent) if (current_est < 200): progress_percent = 0 return (current_est, total_est, progress_percent)
3,173,733,034,705,334,300
Estimates the gossip synchronization process and returns the number of synchronized channels, the total channels in the network and a rescaled percentage of the synchronization process.
electrum/lnworker.py
get_sync_progress_estimate
jeroz1/electrum-ravencoin-utd
python
def get_sync_progress_estimate(self) -> Tuple[(Optional[int], Optional[int], Optional[int])]: 'Estimates the gossip synchronization process and returns the number\n of synchronized channels, the total channels in the network and a\n rescaled percentage of the synchronization process.' if (self.num_peers() == 0): return (None, None, None) (nchans_with_0p, nchans_with_1p, nchans_with_2p) = self.channel_db.get_num_channels_partitioned_by_policy_count() num_db_channels = ((nchans_with_0p + nchans_with_1p) + nchans_with_2p) current_est = (num_db_channels - nchans_with_0p) total_est = (len(self.unknown_ids) + num_db_channels) progress = ((current_est / total_est) if (total_est and current_est) else 0) progress_percent = (((1.0 / 0.95) * progress) * 100) progress_percent = min(progress_percent, 100) progress_percent = round(progress_percent) if (current_est < 200): progress_percent = 0 return (current_est, total_est, progress_percent)
@property def channels(self) -> Mapping[(bytes, Channel)]: 'Returns a read-only copy of channels.' with self.lock: return self._channels.copy()
-6,742,783,748,671,863,000
Returns a read-only copy of channels.
electrum/lnworker.py
channels
jeroz1/electrum-ravencoin-utd
python
@property def channels(self) -> Mapping[(bytes, Channel)]: with self.lock: return self._channels.copy()
@property def channel_backups(self) -> Mapping[(bytes, ChannelBackup)]: 'Returns a read-only copy of channels.' with self.lock: return self._channel_backups.copy()
-5,893,535,997,725,226,000
Returns a read-only copy of channels.
electrum/lnworker.py
channel_backups
jeroz1/electrum-ravencoin-utd
python
@property def channel_backups(self) -> Mapping[(bytes, ChannelBackup)]: with self.lock: return self._channel_backups.copy()