repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/model_datastore_input_reader.py
ModelDatastoreInputReader.split_input
def split_input(cls, job_config): """Inherit docs.""" params = job_config.input_reader_params shard_count = job_config.shard_count query_spec = cls._get_query_spec(params) if not property_range.should_shard_by_property_range(query_spec.filters): return super(ModelDatastoreInputReader, cls).split_input(job_config) p_range = property_range.PropertyRange(query_spec.filters, query_spec.model_class_path) p_ranges = p_range.split(shard_count) # User specified a namespace. if query_spec.ns: ns_range = namespace_range.NamespaceRange( namespace_start=query_spec.ns, namespace_end=query_spec.ns, _app=query_spec.app) ns_ranges = [copy.copy(ns_range) for _ in p_ranges] else: ns_keys = namespace_range.get_namespace_keys( query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1) if not ns_keys: return # User doesn't specify ns but the number of ns is small. # We still split by property range. if len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD: ns_ranges = [namespace_range.NamespaceRange(_app=query_spec.app) for _ in p_ranges] # Lots of namespaces. Split by ns. else: ns_ranges = namespace_range.NamespaceRange.split(n=shard_count, contiguous=False, can_query=lambda: True, _app=query_spec.app) p_ranges = [copy.copy(p_range) for _ in ns_ranges] assert len(p_ranges) == len(ns_ranges) iters = [ db_iters.RangeIteratorFactory.create_property_range_iterator( p, ns, query_spec) for p, ns in zip(p_ranges, ns_ranges)] return [cls(i) for i in iters]
python
def split_input(cls, job_config): """Inherit docs.""" params = job_config.input_reader_params shard_count = job_config.shard_count query_spec = cls._get_query_spec(params) if not property_range.should_shard_by_property_range(query_spec.filters): return super(ModelDatastoreInputReader, cls).split_input(job_config) p_range = property_range.PropertyRange(query_spec.filters, query_spec.model_class_path) p_ranges = p_range.split(shard_count) # User specified a namespace. if query_spec.ns: ns_range = namespace_range.NamespaceRange( namespace_start=query_spec.ns, namespace_end=query_spec.ns, _app=query_spec.app) ns_ranges = [copy.copy(ns_range) for _ in p_ranges] else: ns_keys = namespace_range.get_namespace_keys( query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1) if not ns_keys: return # User doesn't specify ns but the number of ns is small. # We still split by property range. if len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD: ns_ranges = [namespace_range.NamespaceRange(_app=query_spec.app) for _ in p_ranges] # Lots of namespaces. Split by ns. else: ns_ranges = namespace_range.NamespaceRange.split(n=shard_count, contiguous=False, can_query=lambda: True, _app=query_spec.app) p_ranges = [copy.copy(p_range) for _ in ns_ranges] assert len(p_ranges) == len(ns_ranges) iters = [ db_iters.RangeIteratorFactory.create_property_range_iterator( p, ns, query_spec) for p, ns in zip(p_ranges, ns_ranges)] return [cls(i) for i in iters]
Inherit docs.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/model_datastore_input_reader.py#L53-L96
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/model_datastore_input_reader.py
ModelDatastoreInputReader.validate
def validate(cls, job_config): """Inherit docs.""" super(ModelDatastoreInputReader, cls).validate(job_config) params = job_config.input_reader_params entity_kind = params[cls.ENTITY_KIND_PARAM] # Fail fast if Model cannot be located. try: model_class = util.for_name(entity_kind) except ImportError, e: raise errors.BadReaderParamsError("Bad entity kind: %s" % e) if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] if issubclass(model_class, db.Model): cls._validate_filters(filters, model_class) else: cls._validate_filters_ndb(filters, model_class) property_range.PropertyRange(filters, entity_kind)
python
def validate(cls, job_config): """Inherit docs.""" super(ModelDatastoreInputReader, cls).validate(job_config) params = job_config.input_reader_params entity_kind = params[cls.ENTITY_KIND_PARAM] # Fail fast if Model cannot be located. try: model_class = util.for_name(entity_kind) except ImportError, e: raise errors.BadReaderParamsError("Bad entity kind: %s" % e) if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] if issubclass(model_class, db.Model): cls._validate_filters(filters, model_class) else: cls._validate_filters_ndb(filters, model_class) property_range.PropertyRange(filters, entity_kind)
Inherit docs.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/model_datastore_input_reader.py#L99-L115
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/model_datastore_input_reader.py
ModelDatastoreInputReader._validate_filters
def _validate_filters(cls, filters, model_class): """Validate user supplied filters. Validate filters are on existing properties and filter values have valid semantics. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type is up to the property's type. model_class: the db.Model class for the entity type to apply filters on. Raises: BadReaderParamsError: if any filter is invalid in any way. """ if not filters: return properties = model_class.properties() for f in filters: prop, _, val = f if prop not in properties: raise errors.BadReaderParamsError( "Property %s is not defined for entity type %s", prop, model_class.kind()) # Validate the value of each filter. We need to know filters have # valid value to carry out splits. try: properties[prop].validate(val) except db.BadValueError, e: raise errors.BadReaderParamsError(e)
python
def _validate_filters(cls, filters, model_class): """Validate user supplied filters. Validate filters are on existing properties and filter values have valid semantics. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type is up to the property's type. model_class: the db.Model class for the entity type to apply filters on. Raises: BadReaderParamsError: if any filter is invalid in any way. """ if not filters: return properties = model_class.properties() for f in filters: prop, _, val = f if prop not in properties: raise errors.BadReaderParamsError( "Property %s is not defined for entity type %s", prop, model_class.kind()) # Validate the value of each filter. We need to know filters have # valid value to carry out splits. try: properties[prop].validate(val) except db.BadValueError, e: raise errors.BadReaderParamsError(e)
Validate user supplied filters. Validate filters are on existing properties and filter values have valid semantics. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type is up to the property's type. model_class: the db.Model class for the entity type to apply filters on. Raises: BadReaderParamsError: if any filter is invalid in any way.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/model_datastore_input_reader.py#L118-L150
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/model_datastore_input_reader.py
ModelDatastoreInputReader._validate_filters_ndb
def _validate_filters_ndb(cls, filters, model_class): """Validate ndb.Model filters.""" if not filters: return properties = model_class._properties for f in filters: prop, _, val = f if prop not in properties: raise errors.BadReaderParamsError( "Property %s is not defined for entity type %s", prop, model_class._get_kind()) # Validate the value of each filter. We need to know filters have # valid value to carry out splits. try: properties[prop]._do_validate(val) except db.BadValueError, e: raise errors.BadReaderParamsError(e)
python
def _validate_filters_ndb(cls, filters, model_class): """Validate ndb.Model filters.""" if not filters: return properties = model_class._properties for f in filters: prop, _, val = f if prop not in properties: raise errors.BadReaderParamsError( "Property %s is not defined for entity type %s", prop, model_class._get_kind()) # Validate the value of each filter. We need to know filters have # valid value to carry out splits. try: properties[prop]._do_validate(val) except db.BadValueError, e: raise errors.BadReaderParamsError(e)
Validate ndb.Model filters.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/model_datastore_input_reader.py#L154-L173
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_normalize_entity
def _normalize_entity(value): """Return an entity from an entity or model instance.""" if ndb is not None and isinstance(value, ndb.Model): return None if getattr(value, "_populate_internal_entity", None): return value._populate_internal_entity() return value
python
def _normalize_entity(value): """Return an entity from an entity or model instance.""" if ndb is not None and isinstance(value, ndb.Model): return None if getattr(value, "_populate_internal_entity", None): return value._populate_internal_entity() return value
Return an entity from an entity or model instance.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L71-L77
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_normalize_key
def _normalize_key(value): """Return a key from an entity, model instance, key, or key string.""" if ndb is not None and isinstance(value, (ndb.Model, ndb.Key)): return None if getattr(value, "key", None): return value.key() elif isinstance(value, basestring): return datastore.Key(value) else: return value
python
def _normalize_key(value): """Return a key from an entity, model instance, key, or key string.""" if ndb is not None and isinstance(value, (ndb.Model, ndb.Key)): return None if getattr(value, "key", None): return value.key() elif isinstance(value, basestring): return datastore.Key(value) else: return value
Return a key from an entity, model instance, key, or key string.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L80-L89
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_ItemList.append
def append(self, item): """Add new item to the list. If needed, append will first flush existing items and clear existing items. Args: item: an item to add to the list. """ if self.should_flush(): self.flush() self.items.append(item)
python
def append(self, item): """Add new item to the list. If needed, append will first flush existing items and clear existing items. Args: item: an item to add to the list. """ if self.should_flush(): self.flush() self.items.append(item)
Add new item to the list. If needed, append will first flush existing items and clear existing items. Args: item: an item to add to the list.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L133-L143
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_ItemList.flush
def flush(self): """Force a flush.""" if not self.items: return retry = 0 options = {"deadline": DATASTORE_DEADLINE} while retry <= self.__timeout_retries: try: self.__flush_function(self.items, options) self.clear() break except db.Timeout, e: logging.warning(e) logging.warning("Flushing '%s' timed out. Will retry for the %s time.", self, retry) retry += 1 options["deadline"] *= 2 except apiproxy_errors.RequestTooLargeError: self._log_largest_items() raise else: raise
python
def flush(self): """Force a flush.""" if not self.items: return retry = 0 options = {"deadline": DATASTORE_DEADLINE} while retry <= self.__timeout_retries: try: self.__flush_function(self.items, options) self.clear() break except db.Timeout, e: logging.warning(e) logging.warning("Flushing '%s' timed out. Will retry for the %s time.", self, retry) retry += 1 options["deadline"] *= 2 except apiproxy_errors.RequestTooLargeError: self._log_largest_items() raise else: raise
Force a flush.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L145-L167
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool.put
def put(self, entity): """Registers entity to put to datastore. Args: entity: an entity or model instance to put. """ actual_entity = _normalize_entity(entity) if actual_entity is None: return self.ndb_put(entity) self.puts.append(actual_entity)
python
def put(self, entity): """Registers entity to put to datastore. Args: entity: an entity or model instance to put. """ actual_entity = _normalize_entity(entity) if actual_entity is None: return self.ndb_put(entity) self.puts.append(actual_entity)
Registers entity to put to datastore. Args: entity: an entity or model instance to put.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L249-L258
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool.ndb_put
def ndb_put(self, entity): """Like put(), but for NDB entities.""" assert ndb is not None and isinstance(entity, ndb.Model) self.ndb_puts.append(entity)
python
def ndb_put(self, entity): """Like put(), but for NDB entities.""" assert ndb is not None and isinstance(entity, ndb.Model) self.ndb_puts.append(entity)
Like put(), but for NDB entities.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L260-L263
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool.delete
def delete(self, entity): """Registers entity to delete from datastore. Args: entity: an entity, model instance, or key to delete. """ key = _normalize_key(entity) if key is None: return self.ndb_delete(entity) self.deletes.append(key)
python
def delete(self, entity): """Registers entity to delete from datastore. Args: entity: an entity, model instance, or key to delete. """ key = _normalize_key(entity) if key is None: return self.ndb_delete(entity) self.deletes.append(key)
Registers entity to delete from datastore. Args: entity: an entity, model instance, or key to delete.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L265-L274
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool.ndb_delete
def ndb_delete(self, entity_or_key): """Like delete(), but for NDB entities/keys.""" if ndb is not None and isinstance(entity_or_key, ndb.Model): key = entity_or_key.key else: key = entity_or_key self.ndb_deletes.append(key)
python
def ndb_delete(self, entity_or_key): """Like delete(), but for NDB entities/keys.""" if ndb is not None and isinstance(entity_or_key, ndb.Model): key = entity_or_key.key else: key = entity_or_key self.ndb_deletes.append(key)
Like delete(), but for NDB entities/keys.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L276-L282
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool.flush
def flush(self): """Flush(apply) all changed to datastore.""" self.puts.flush() self.deletes.flush() self.ndb_puts.flush() self.ndb_deletes.flush()
python
def flush(self): """Flush(apply) all changed to datastore.""" self.puts.flush() self.deletes.flush() self.ndb_puts.flush() self.ndb_deletes.flush()
Flush(apply) all changed to datastore.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L284-L289
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool._flush_puts
def _flush_puts(self, items, options): """Flush all puts to datastore.""" datastore.Put(items, config=self._create_config(options))
python
def _flush_puts(self, items, options): """Flush all puts to datastore.""" datastore.Put(items, config=self._create_config(options))
Flush all puts to datastore.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L315-L317
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool._flush_deletes
def _flush_deletes(self, items, options): """Flush all deletes to datastore.""" datastore.Delete(items, config=self._create_config(options))
python
def _flush_deletes(self, items, options): """Flush all deletes to datastore.""" datastore.Delete(items, config=self._create_config(options))
Flush all deletes to datastore.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L319-L321
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool._flush_ndb_puts
def _flush_ndb_puts(self, items, options): """Flush all NDB puts to datastore.""" assert ndb is not None ndb.put_multi(items, config=self._create_config(options))
python
def _flush_ndb_puts(self, items, options): """Flush all NDB puts to datastore.""" assert ndb is not None ndb.put_multi(items, config=self._create_config(options))
Flush all NDB puts to datastore.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L323-L326
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool._flush_ndb_deletes
def _flush_ndb_deletes(self, items, options): """Flush all deletes to datastore.""" assert ndb is not None ndb.delete_multi(items, config=self._create_config(options))
python
def _flush_ndb_deletes(self, items, options): """Flush all deletes to datastore.""" assert ndb is not None ndb.delete_multi(items, config=self._create_config(options))
Flush all deletes to datastore.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L328-L331
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_MutationPool._create_config
def _create_config(self, options): """Creates datastore Config. Returns: A datastore_rpc.Configuration instance. """ return datastore.CreateConfig(deadline=options["deadline"], force_writes=self.force_writes)
python
def _create_config(self, options): """Creates datastore Config. Returns: A datastore_rpc.Configuration instance. """ return datastore.CreateConfig(deadline=options["deadline"], force_writes=self.force_writes)
Creates datastore Config. Returns: A datastore_rpc.Configuration instance.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L333-L340
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/context.py
_Counters.increment
def increment(self, counter_name, delta=1): """Increment counter value. Args: counter_name: name of the counter as string. delta: increment delta as int. """ self._shard_state.counters_map.increment(counter_name, delta)
python
def increment(self, counter_name, delta=1): """Increment counter value. Args: counter_name: name of the counter as string. delta: increment delta as int. """ self._shard_state.counters_map.increment(counter_name, delta)
Increment counter value. Args: counter_name: name of the counter as string. delta: increment delta as int.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L358-L365
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
_PropertyRangeModelIterator.to_json
def to_json(self): """Inherit doc.""" cursor = self._cursor if self._query is not None: if isinstance(self._query, db.Query): cursor = self._query.cursor() else: cursor = self._query.cursor_after() if cursor is None or isinstance(cursor, basestring): cursor_object = False else: cursor_object = True cursor = cursor.to_websafe_string() return {"property_range": self._property_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "ns_range": self._ns_range.to_json_object(), "name": self.__class__.__name__, "cursor_object": cursor_object}
python
def to_json(self): """Inherit doc.""" cursor = self._cursor if self._query is not None: if isinstance(self._query, db.Query): cursor = self._query.cursor() else: cursor = self._query.cursor_after() if cursor is None or isinstance(cursor, basestring): cursor_object = False else: cursor_object = True cursor = cursor.to_websafe_string() return {"property_range": self._property_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "ns_range": self._ns_range.to_json_object(), "name": self.__class__.__name__, "cursor_object": cursor_object}
Inherit doc.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L195-L215
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
_PropertyRangeModelIterator.from_json
def from_json(cls, json): """Inherit doc.""" obj = cls(property_range.PropertyRange.from_json(json["property_range"]), namespace_range.NamespaceRange.from_json_object(json["ns_range"]), model.QuerySpec.from_json(json["query_spec"])) cursor = json["cursor"] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json["cursor_object"]: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) else: obj._cursor = cursor return obj
python
def from_json(cls, json): """Inherit doc.""" obj = cls(property_range.PropertyRange.from_json(json["property_range"]), namespace_range.NamespaceRange.from_json_object(json["ns_range"]), model.QuerySpec.from_json(json["query_spec"])) cursor = json["cursor"] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json["cursor_object"]: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) else: obj._cursor = cursor return obj
Inherit doc.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L222-L234
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
_MultiPropertyRangeModelIterator.to_json
def to_json(self): """Inherit doc.""" json = {"name": self.__class__.__name__, "num_ranges": len(self._iters)} for i in xrange(len(self._iters)): json_item = self._iters[i].to_json() query_spec = json_item["query_spec"] item_name = json_item["name"] # Delete and move one level up del json_item["query_spec"] del json_item["name"] json[str(i)] = json_item # Store once to save space json["query_spec"] = query_spec json["item_name"] = item_name return json
python
def to_json(self): """Inherit doc.""" json = {"name": self.__class__.__name__, "num_ranges": len(self._iters)} for i in xrange(len(self._iters)): json_item = self._iters[i].to_json() query_spec = json_item["query_spec"] item_name = json_item["name"] # Delete and move one level up del json_item["query_spec"] del json_item["name"] json[str(i)] = json_item # Store once to save space json["query_spec"] = query_spec json["item_name"] = item_name return json
Inherit doc.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L262-L279
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
_MultiPropertyRangeModelIterator.from_json
def from_json(cls, json): """Inherit doc.""" num_ranges = int(json["num_ranges"]) query_spec = json["query_spec"] item_name = json["item_name"] p_range_iters = [] for i in xrange(num_ranges): json_item = json[str(i)] # Place query_spec, name back into each iterator json_item["query_spec"] = query_spec json_item["name"] = item_name p_range_iters.append(_PropertyRangeModelIterator.from_json(json_item)) obj = cls(p_range_iters) return obj
python
def from_json(cls, json): """Inherit doc.""" num_ranges = int(json["num_ranges"]) query_spec = json["query_spec"] item_name = json["item_name"] p_range_iters = [] for i in xrange(num_ranges): json_item = json[str(i)] # Place query_spec, name back into each iterator json_item["query_spec"] = query_spec json_item["name"] = item_name p_range_iters.append(_PropertyRangeModelIterator.from_json(json_item)) obj = cls(p_range_iters) return obj
Inherit doc.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L282-L297
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
_KeyRangesIterator.to_json
def to_json(self): """Inherit doc.""" current_iter = None if self._current_iter: current_iter = self._current_iter.to_json() return {"key_ranges": self._key_ranges.to_json(), "query_spec": self._query_spec.to_json(), "current_iter": current_iter, "key_range_iter_cls": self._key_range_iter_cls.__name__, "name": self.__class__.__name__}
python
def to_json(self): """Inherit doc.""" current_iter = None if self._current_iter: current_iter = self._current_iter.to_json() return {"key_ranges": self._key_ranges.to_json(), "query_spec": self._query_spec.to_json(), "current_iter": current_iter, "key_range_iter_cls": self._key_range_iter_cls.__name__, "name": self.__class__.__name__}
Inherit doc.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L339-L349
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
_KeyRangesIterator.from_json
def from_json(cls, json): """Inherit doc.""" key_range_iter_cls = _KEY_RANGE_ITERATORS[json["key_range_iter_cls"]] obj = cls(key_ranges.KeyRangesFactory.from_json(json["key_ranges"]), model.QuerySpec.from_json(json["query_spec"]), key_range_iter_cls) current_iter = None if json["current_iter"]: current_iter = key_range_iter_cls.from_json(json["current_iter"]) # pylint: disable=protected-access obj._current_iter = current_iter return obj
python
def from_json(cls, json): """Inherit doc.""" key_range_iter_cls = _KEY_RANGE_ITERATORS[json["key_range_iter_cls"]] obj = cls(key_ranges.KeyRangesFactory.from_json(json["key_ranges"]), model.QuerySpec.from_json(json["query_spec"]), key_range_iter_cls) current_iter = None if json["current_iter"]: current_iter = key_range_iter_cls.from_json(json["current_iter"]) # pylint: disable=protected-access obj._current_iter = current_iter return obj
Inherit doc.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L352-L364
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
AbstractKeyRangeIterator.to_json
def to_json(self): """Serializes all states into json form. Returns: all states in json-compatible map. """ cursor = self._get_cursor() cursor_object = False if cursor and isinstance(cursor, datastore_query.Cursor): cursor = cursor.to_websafe_string() cursor_object = True return {"key_range": self._key_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "cursor_object": cursor_object}
python
def to_json(self): """Serializes all states into json form. Returns: all states in json-compatible map. """ cursor = self._get_cursor() cursor_object = False if cursor and isinstance(cursor, datastore_query.Cursor): cursor = cursor.to_websafe_string() cursor_object = True return {"key_range": self._key_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "cursor_object": cursor_object}
Serializes all states into json form. Returns: all states in json-compatible map.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L405-L419
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
AbstractKeyRangeIterator.from_json
def from_json(cls, json): """Reverse of to_json.""" obj = cls(key_range.KeyRange.from_json(json["key_range"]), model.QuerySpec.from_json(json["query_spec"])) cursor = json["cursor"] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json["cursor_object"]: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) else: obj._cursor = cursor return obj
python
def from_json(cls, json): """Reverse of to_json.""" obj = cls(key_range.KeyRange.from_json(json["key_range"]), model.QuerySpec.from_json(json["query_spec"])) cursor = json["cursor"] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json["cursor_object"]: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) else: obj._cursor = cursor return obj
Reverse of to_json.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L422-L433
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/status.py
find_mapreduce_yaml
def find_mapreduce_yaml(status_file=__file__): """Traverse directory trees to find mapreduce.yaml file. Begins with the location of status.py and then moves on to check the working directory. Args: status_file: location of status.py, overridable for testing purposes. Returns: the path of mapreduce.yaml file or None if not found. """ checked = set() yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked) if not yaml: yaml = _find_mapreduce_yaml(os.getcwd(), checked) return yaml
python
def find_mapreduce_yaml(status_file=__file__): """Traverse directory trees to find mapreduce.yaml file. Begins with the location of status.py and then moves on to check the working directory. Args: status_file: location of status.py, overridable for testing purposes. Returns: the path of mapreduce.yaml file or None if not found. """ checked = set() yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked) if not yaml: yaml = _find_mapreduce_yaml(os.getcwd(), checked) return yaml
Traverse directory trees to find mapreduce.yaml file. Begins with the location of status.py and then moves on to check the working directory. Args: status_file: location of status.py, overridable for testing purposes. Returns: the path of mapreduce.yaml file or None if not found.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/status.py#L161-L177
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/status.py
_find_mapreduce_yaml
def _find_mapreduce_yaml(start, checked): """Traverse the directory tree identified by start until a directory already in checked is encountered or the path of mapreduce.yaml is found. Checked is present both to make loop termination easy to reason about and so that the same directories do not get rechecked. Args: start: the path to start in and work upward from checked: the set of already examined directories Returns: the path of mapreduce.yaml file or None if not found. """ dir = start while dir not in checked: checked.add(dir) for mr_yaml_name in MR_YAML_NAMES: yaml_path = os.path.join(dir, mr_yaml_name) if os.path.exists(yaml_path): return yaml_path dir = os.path.dirname(dir) return None
python
def _find_mapreduce_yaml(start, checked): """Traverse the directory tree identified by start until a directory already in checked is encountered or the path of mapreduce.yaml is found. Checked is present both to make loop termination easy to reason about and so that the same directories do not get rechecked. Args: start: the path to start in and work upward from checked: the set of already examined directories Returns: the path of mapreduce.yaml file or None if not found. """ dir = start while dir not in checked: checked.add(dir) for mr_yaml_name in MR_YAML_NAMES: yaml_path = os.path.join(dir, mr_yaml_name) if os.path.exists(yaml_path): return yaml_path dir = os.path.dirname(dir) return None
Traverse the directory tree identified by start until a directory already in checked is encountered or the path of mapreduce.yaml is found. Checked is present both to make loop termination easy to reason about and so that the same directories do not get rechecked. Args: start: the path to start in and work upward from checked: the set of already examined directories Returns: the path of mapreduce.yaml file or None if not found.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/status.py#L180-L202
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/status.py
parse_mapreduce_yaml
def parse_mapreduce_yaml(contents): """Parses mapreduce.yaml file contents. Args: contents: mapreduce.yaml file contents. Returns: MapReduceYaml object with all the data from original file. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file. """ try: builder = yaml_object.ObjectBuilder(MapReduceYaml) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(contents) mr_info = handler.GetResults() except (ValueError, yaml_errors.EventError), e: raise errors.BadYamlError(e) if len(mr_info) < 1: raise errors.BadYamlError("No configs found in mapreduce.yaml") if len(mr_info) > 1: raise errors.MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info)) jobs = mr_info[0] job_names = set(j.name for j in jobs.mapreduce) if len(jobs.mapreduce) != len(job_names): raise errors.BadYamlError( "Overlapping mapreduce names; names must be unique") return jobs
python
def parse_mapreduce_yaml(contents): """Parses mapreduce.yaml file contents. Args: contents: mapreduce.yaml file contents. Returns: MapReduceYaml object with all the data from original file. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file. """ try: builder = yaml_object.ObjectBuilder(MapReduceYaml) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(contents) mr_info = handler.GetResults() except (ValueError, yaml_errors.EventError), e: raise errors.BadYamlError(e) if len(mr_info) < 1: raise errors.BadYamlError("No configs found in mapreduce.yaml") if len(mr_info) > 1: raise errors.MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info)) jobs = mr_info[0] job_names = set(j.name for j in jobs.mapreduce) if len(jobs.mapreduce) != len(job_names): raise errors.BadYamlError( "Overlapping mapreduce names; names must be unique") return jobs
Parses mapreduce.yaml file contents. Args: contents: mapreduce.yaml file contents. Returns: MapReduceYaml object with all the data from original file. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/status.py#L205-L239
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/status.py
get_mapreduce_yaml
def get_mapreduce_yaml(parse=parse_mapreduce_yaml): """Locates mapreduce.yaml, loads and parses its info. Args: parse: Used for testing. Returns: MapReduceYaml object. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the file is missing. """ mr_yaml_path = find_mapreduce_yaml() if not mr_yaml_path: raise errors.MissingYamlError() mr_yaml_file = open(mr_yaml_path) try: return parse(mr_yaml_file.read()) finally: mr_yaml_file.close()
python
def get_mapreduce_yaml(parse=parse_mapreduce_yaml): """Locates mapreduce.yaml, loads and parses its info. Args: parse: Used for testing. Returns: MapReduceYaml object. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the file is missing. """ mr_yaml_path = find_mapreduce_yaml() if not mr_yaml_path: raise errors.MissingYamlError() mr_yaml_file = open(mr_yaml_path) try: return parse(mr_yaml_file.read()) finally: mr_yaml_file.close()
Locates mapreduce.yaml, loads and parses its info. Args: parse: Used for testing. Returns: MapReduceYaml object. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the file is missing.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/status.py#L242-L262
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/status.py
MapReduceYaml.to_dict
def to_dict(mapreduce_yaml): """Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries. """ all_configs = [] for config in mapreduce_yaml.mapreduce: out = { "name": config.name, "mapper_input_reader": config.mapper.input_reader, "mapper_handler": config.mapper.handler, } if config.mapper.params_validator: out["mapper_params_validator"] = config.mapper.params_validator if config.mapper.params: param_defaults = {} for param in config.mapper.params: param_defaults[param.name] = param.default or param.value out["mapper_params"] = param_defaults if config.params: param_defaults = {} for param in config.params: param_defaults[param.name] = param.default or param.value out["params"] = param_defaults if config.mapper.output_writer: out["mapper_output_writer"] = config.mapper.output_writer all_configs.append(out) return all_configs
python
def to_dict(mapreduce_yaml): """Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries. """ all_configs = [] for config in mapreduce_yaml.mapreduce: out = { "name": config.name, "mapper_input_reader": config.mapper.input_reader, "mapper_handler": config.mapper.handler, } if config.mapper.params_validator: out["mapper_params_validator"] = config.mapper.params_validator if config.mapper.params: param_defaults = {} for param in config.mapper.params: param_defaults[param.name] = param.default or param.value out["mapper_params"] = param_defaults if config.params: param_defaults = {} for param in config.params: param_defaults[param.name] = param.default or param.value out["params"] = param_defaults if config.mapper.output_writer: out["mapper_output_writer"] = config.mapper.output_writer all_configs.append(out) return all_configs
Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/status.py#L120-L155
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/input_reader.py
InputReader.validate
def validate(cls, job_config): """Validates relevant parameters. This method can validate fields which it deems relevant. Args: job_config: an instance of map_job.JobConfig. Raises: errors.BadReaderParamsError: required parameters are missing or invalid. """ if job_config.input_reader_cls != cls: raise errors.BadReaderParamsError( "Expect input reader class %r, got %r." % (cls, job_config.input_reader_cls))
python
def validate(cls, job_config): """Validates relevant parameters. This method can validate fields which it deems relevant. Args: job_config: an instance of map_job.JobConfig. Raises: errors.BadReaderParamsError: required parameters are missing or invalid. """ if job_config.input_reader_cls != cls: raise errors.BadReaderParamsError( "Expect input reader class %r, got %r." % (cls, job_config.input_reader_cls))
Validates relevant parameters. This method can validate fields which it deems relevant. Args: job_config: an instance of map_job.JobConfig. Raises: errors.BadReaderParamsError: required parameters are missing or invalid.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/input_reader.py#L104-L118
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_sort_records_map
def _sort_records_map(records): """Map function sorting records. Converts records to KeyValue protos, sorts them by key and writes them into new GCS file. Creates _OutputFile entity to record resulting file name. Args: records: list of records which are serialized KeyValue protos. """ ctx = context.get() l = len(records) key_records = [None] * l logging.debug("Parsing") for i in range(l): proto = kv_pb.KeyValue() proto.ParseFromString(records[i]) key_records[i] = (proto.key(), records[i]) logging.debug("Sorting") key_records.sort(cmp=_compare_keys) logging.debug("Writing") mapper_spec = ctx.mapreduce_spec.mapper params = input_readers._get_params(mapper_spec) bucket_name = params.get("bucket_name") filename = (ctx.mapreduce_spec.name + "/" + ctx.mapreduce_id + "/output-" + ctx.shard_id + "-" + str(int(time.time()))) full_filename = "/%s/%s" % (bucket_name, filename) filehandle = cloudstorage.open(full_filename, mode="w") with output_writers.GCSRecordsPool(filehandle, ctx=ctx) as pool: for key_record in key_records: pool.append(key_record[1]) logging.debug("Finalizing") filehandle.close() entity = _OutputFile(key_name=full_filename, parent=_OutputFile.get_root_key(ctx.mapreduce_id)) entity.put()
python
def _sort_records_map(records): """Map function sorting records. Converts records to KeyValue protos, sorts them by key and writes them into new GCS file. Creates _OutputFile entity to record resulting file name. Args: records: list of records which are serialized KeyValue protos. """ ctx = context.get() l = len(records) key_records = [None] * l logging.debug("Parsing") for i in range(l): proto = kv_pb.KeyValue() proto.ParseFromString(records[i]) key_records[i] = (proto.key(), records[i]) logging.debug("Sorting") key_records.sort(cmp=_compare_keys) logging.debug("Writing") mapper_spec = ctx.mapreduce_spec.mapper params = input_readers._get_params(mapper_spec) bucket_name = params.get("bucket_name") filename = (ctx.mapreduce_spec.name + "/" + ctx.mapreduce_id + "/output-" + ctx.shard_id + "-" + str(int(time.time()))) full_filename = "/%s/%s" % (bucket_name, filename) filehandle = cloudstorage.open(full_filename, mode="w") with output_writers.GCSRecordsPool(filehandle, ctx=ctx) as pool: for key_record in key_records: pool.append(key_record[1]) logging.debug("Finalizing") filehandle.close() entity = _OutputFile(key_name=full_filename, parent=_OutputFile.get_root_key(ctx.mapreduce_id)) entity.put()
Map function sorting records. Converts records to KeyValue protos, sorts them by key and writes them into new GCS file. Creates _OutputFile entity to record resulting file name. Args: records: list of records which are serialized KeyValue protos.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L124-L164
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_merge_map
def _merge_map(key, values, partial): """A map function used in merge phase. Stores (key, values) into KeyValues proto and yields its serialization. Args: key: values key. values: values themselves. partial: True if more values for this key will follow. False otherwise. Yields: The proto. """ proto = kv_pb.KeyValues() proto.set_key(key) proto.value_list().extend(values) yield proto.Encode()
python
def _merge_map(key, values, partial): """A map function used in merge phase. Stores (key, values) into KeyValues proto and yields its serialization. Args: key: values key. values: values themselves. partial: True if more values for this key will follow. False otherwise. Yields: The proto. """ proto = kv_pb.KeyValues() proto.set_key(key) proto.value_list().extend(values) yield proto.Encode()
A map function used in merge phase. Stores (key, values) into KeyValues proto and yields its serialization. Args: key: values key. values: values themselves. partial: True if more values for this key will follow. False otherwise. Yields: The proto.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L561-L577
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_hashing_map
def _hashing_map(binary_record): """A map function used in hash phase. Reads KeyValue from binary record. Args: binary_record: The binary record. Yields: The (key, value). """ proto = kv_pb.KeyValue() proto.ParseFromString(binary_record) yield (proto.key(), proto.value())
python
def _hashing_map(binary_record): """A map function used in hash phase. Reads KeyValue from binary record. Args: binary_record: The binary record. Yields: The (key, value). """ proto = kv_pb.KeyValue() proto.ParseFromString(binary_record) yield (proto.key(), proto.value())
A map function used in hash phase. Reads KeyValue from binary record. Args: binary_record: The binary record. Yields: The (key, value).
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L618-L631
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_MergingReader.split_input
def split_input(cls, mapper_spec): """Split input into multiple shards.""" filelists = mapper_spec.params[cls.FILES_PARAM] max_values_count = mapper_spec.params.get(cls.MAX_VALUES_COUNT_PARAM, -1) max_values_size = mapper_spec.params.get(cls.MAX_VALUES_SIZE_PARAM, -1) return [cls([0] * len(files), max_values_count, max_values_size) for files in filelists]
python
def split_input(cls, mapper_spec): """Split input into multiple shards.""" filelists = mapper_spec.params[cls.FILES_PARAM] max_values_count = mapper_spec.params.get(cls.MAX_VALUES_COUNT_PARAM, -1) max_values_size = mapper_spec.params.get(cls.MAX_VALUES_SIZE_PARAM, -1) return [cls([0] * len(files), max_values_count, max_values_size) for files in filelists]
Split input into multiple shards.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L386-L392
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_MergingReader.validate
def validate(cls, mapper_spec): """Validate reader parameters in mapper_spec.""" if mapper_spec.input_reader_class() != cls: raise errors.BadReaderParamsError("Input reader class mismatch") params = mapper_spec.params if cls.FILES_PARAM not in params: raise errors.BadReaderParamsError("Missing files parameter.")
python
def validate(cls, mapper_spec): """Validate reader parameters in mapper_spec.""" if mapper_spec.input_reader_class() != cls: raise errors.BadReaderParamsError("Input reader class mismatch") params = mapper_spec.params if cls.FILES_PARAM not in params: raise errors.BadReaderParamsError("Missing files parameter.")
Validate reader parameters in mapper_spec.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L395-L401
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_HashingGCSOutputWriter.validate
def validate(cls, mapper_spec): """Validates mapper specification. Args: mapper_spec: an instance of model.MapperSpec to validate. Raises: BadWriterParamsError: when Output writer class mismatch. """ if mapper_spec.output_writer_class() != cls: raise errors.BadWriterParamsError("Output writer class mismatch") params = output_writers._get_params(mapper_spec) # Bucket Name is required if cls.BUCKET_NAME_PARAM not in params: raise errors.BadWriterParamsError( "%s is required for the _HashingGCSOutputWriter" % cls.BUCKET_NAME_PARAM)
python
def validate(cls, mapper_spec): """Validates mapper specification. Args: mapper_spec: an instance of model.MapperSpec to validate. Raises: BadWriterParamsError: when Output writer class mismatch. """ if mapper_spec.output_writer_class() != cls: raise errors.BadWriterParamsError("Output writer class mismatch") params = output_writers._get_params(mapper_spec) # Bucket Name is required if cls.BUCKET_NAME_PARAM not in params: raise errors.BadWriterParamsError( "%s is required for the _HashingGCSOutputWriter" % cls.BUCKET_NAME_PARAM)
Validates mapper specification. Args: mapper_spec: an instance of model.MapperSpec to validate. Raises: BadWriterParamsError: when Output writer class mismatch.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L429-L444
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_HashingGCSOutputWriter.to_json
def to_json(self): """Returns writer state to serialize in json. Returns: A json-izable version of the OutputWriter state. """ # Use the member variable (since we don't have access to the context) to # flush each pool to minimize the size of each filehandle before we # serialize it. for pool in self._pools: if pool is not None: pool.flush(True) return {"filehandles": pickle.dumps(self._filehandles)}
python
def to_json(self): """Returns writer state to serialize in json. Returns: A json-izable version of the OutputWriter state. """ # Use the member variable (since we don't have access to the context) to # flush each pool to minimize the size of each filehandle before we # serialize it. for pool in self._pools: if pool is not None: pool.flush(True) return {"filehandles": pickle.dumps(self._filehandles)}
Returns writer state to serialize in json. Returns: A json-izable version of the OutputWriter state.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L458-L470
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_HashingGCSOutputWriter.create
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None): """Inherit docs.""" mapper_spec = mr_spec.mapper params = output_writers._get_params(mapper_spec) bucket_name = params.get(cls.BUCKET_NAME_PARAM) shards = mapper_spec.shard_count filehandles = [] filename = (mr_spec.name + "/" + mr_spec.mapreduce_id + "/shard-" + str(shard_number) + "-bucket-") for i in range(shards): full_filename = "/%s/%s%d" % (bucket_name, filename, i) filehandles.append(cloudstorage.open(full_filename, mode="w")) return cls(filehandles)
python
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None): """Inherit docs.""" mapper_spec = mr_spec.mapper params = output_writers._get_params(mapper_spec) bucket_name = params.get(cls.BUCKET_NAME_PARAM) shards = mapper_spec.shard_count filehandles = [] filename = (mr_spec.name + "/" + mr_spec.mapreduce_id + "/shard-" + str(shard_number) + "-bucket-") for i in range(shards): full_filename = "/%s/%s%d" % (bucket_name, filename, i) filehandles.append(cloudstorage.open(full_filename, mode="w")) return cls(filehandles)
Inherit docs.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L473-L486
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_HashingGCSOutputWriter.get_filenames
def get_filenames(cls, mapreduce_state): """See parent class.""" shards = mapreduce_state.mapreduce_spec.mapper.shard_count filenames = [] for _ in range(shards): filenames.append([None] * shards) shard_states = model.ShardState.find_all_by_mapreduce_state(mapreduce_state) for x, shard_state in enumerate(shard_states): shard_filenames = shard_state.writer_state["shard_filenames"] for y in range(shards): filenames[y][x] = shard_filenames[y] return filenames
python
def get_filenames(cls, mapreduce_state): """See parent class.""" shards = mapreduce_state.mapreduce_spec.mapper.shard_count filenames = [] for _ in range(shards): filenames.append([None] * shards) shard_states = model.ShardState.find_all_by_mapreduce_state(mapreduce_state) for x, shard_state in enumerate(shard_states): shard_filenames = shard_state.writer_state["shard_filenames"] for y in range(shards): filenames[y][x] = shard_filenames[y] return filenames
See parent class.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L489-L500
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_HashingGCSOutputWriter.finalize
def finalize(self, ctx, shard_state): """See parent class.""" filenames = [] for filehandle in self._filehandles: filenames.append(filehandle.name) filehandle.close() shard_state.writer_state = {"shard_filenames": filenames}
python
def finalize(self, ctx, shard_state): """See parent class.""" filenames = [] for filehandle in self._filehandles: filenames.append(filehandle.name) filehandle.close() shard_state.writer_state = {"shard_filenames": filenames}
See parent class.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L502-L508
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/shuffler.py
_HashingGCSOutputWriter.write
def write(self, data): """Write data. Args: data: actual data yielded from handler. Type is writer-specific. """ ctx = context.get() if len(data) != 2: logging.error("Got bad tuple of length %d (2-tuple expected): %s", len(data), data) try: key = str(data[0]) value = str(data[1]) except TypeError: logging.error("Expecting a tuple, but got %s: %s", data.__class__.__name__, data) file_index = key.__hash__() % len(self._filehandles) # Work-around: Since we don't have access to the context in the to_json() # function, but we need to flush each pool before we serialize the # filehandle, we rely on a member variable instead of using context for # pool management. pool = self._pools[file_index] if pool is None: filehandle = self._filehandles[file_index] pool = output_writers.GCSRecordsPool(filehandle=filehandle, ctx=ctx) self._pools[file_index] = pool proto = kv_pb.KeyValue() proto.set_key(key) proto.set_value(value) pool.append(proto.Encode())
python
def write(self, data): """Write data. Args: data: actual data yielded from handler. Type is writer-specific. """ ctx = context.get() if len(data) != 2: logging.error("Got bad tuple of length %d (2-tuple expected): %s", len(data), data) try: key = str(data[0]) value = str(data[1]) except TypeError: logging.error("Expecting a tuple, but got %s: %s", data.__class__.__name__, data) file_index = key.__hash__() % len(self._filehandles) # Work-around: Since we don't have access to the context in the to_json() # function, but we need to flush each pool before we serialize the # filehandle, we rely on a member variable instead of using context for # pool management. pool = self._pools[file_index] if pool is None: filehandle = self._filehandles[file_index] pool = output_writers.GCSRecordsPool(filehandle=filehandle, ctx=ctx) self._pools[file_index] = pool proto = kv_pb.KeyValue() proto.set_key(key) proto.set_value(value) pool.append(proto.Encode())
Write data. Args: data: actual data yielded from handler. Type is writer-specific.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L510-L543
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_config.py
JobConfig._get_mapper_params
def _get_mapper_params(self): """Converts self to model.MapperSpec.params.""" reader_params = self.input_reader_cls.params_to_json( self.input_reader_params) # TODO(user): Do the same for writer params. return {"input_reader": reader_params, "output_writer": self.output_writer_params}
python
def _get_mapper_params(self): """Converts self to model.MapperSpec.params.""" reader_params = self.input_reader_cls.params_to_json( self.input_reader_params) # TODO(user): Do the same for writer params. return {"input_reader": reader_params, "output_writer": self.output_writer_params}
Converts self to model.MapperSpec.params.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_config.py#L114-L120
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_config.py
JobConfig._get_mapper_spec
def _get_mapper_spec(self): """Converts self to model.MapperSpec.""" # pylint: disable=g-import-not-at-top from mapreduce import model return model.MapperSpec( handler_spec=util._obj_to_path(self.mapper), input_reader_spec=util._obj_to_path(self.input_reader_cls), params=self._get_mapper_params(), shard_count=self.shard_count, output_writer_spec=util._obj_to_path(self.output_writer_cls))
python
def _get_mapper_spec(self): """Converts self to model.MapperSpec.""" # pylint: disable=g-import-not-at-top from mapreduce import model return model.MapperSpec( handler_spec=util._obj_to_path(self.mapper), input_reader_spec=util._obj_to_path(self.input_reader_cls), params=self._get_mapper_params(), shard_count=self.shard_count, output_writer_spec=util._obj_to_path(self.output_writer_cls))
Converts self to model.MapperSpec.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_config.py#L122-L132
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_config.py
JobConfig._get_mr_params
def _get_mr_params(self): """Converts self to model.MapreduceSpec.params.""" return {"force_writes": self._force_writes, "done_callback": self.done_callback_url, "user_params": self.user_params, "shard_max_attempts": self.shard_max_attempts, "task_max_attempts": self._task_max_attempts, "task_max_data_processing_attempts": self._task_max_data_processing_attempts, "queue_name": self.queue_name, "base_path": self._base_path, "app_id": self._app, "api_version": self._api_version}
python
def _get_mr_params(self): """Converts self to model.MapreduceSpec.params.""" return {"force_writes": self._force_writes, "done_callback": self.done_callback_url, "user_params": self.user_params, "shard_max_attempts": self.shard_max_attempts, "task_max_attempts": self._task_max_attempts, "task_max_data_processing_attempts": self._task_max_data_processing_attempts, "queue_name": self.queue_name, "base_path": self._base_path, "app_id": self._app, "api_version": self._api_version}
Converts self to model.MapreduceSpec.params.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_config.py#L134-L146
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_config.py
JobConfig._get_default_mr_params
def _get_default_mr_params(cls): """Gets default values for old API.""" cfg = cls(_lenient=True) mr_params = cfg._get_mr_params() mr_params["api_version"] = 0 return mr_params
python
def _get_default_mr_params(cls): """Gets default values for old API.""" cfg = cls(_lenient=True) mr_params = cfg._get_mr_params() mr_params["api_version"] = 0 return mr_params
Gets default values for old API.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_config.py#L154-L159
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_config.py
JobConfig._to_map_job_config
def _to_map_job_config(cls, mr_spec, # TODO(user): Remove this parameter after it can be # read from mr_spec. queue_name): """Converts model.MapreduceSpec back to JobConfig. This method allows our internal methods to use JobConfig directly. This method also allows us to expose JobConfig as an API during execution, despite that it is not saved into datastore. Args: mr_spec: model.MapreduceSpec. queue_name: queue name. Returns: The JobConfig object for this job. """ mapper_spec = mr_spec.mapper # 0 means all the old APIs before api_version is introduced. api_version = mr_spec.params.get("api_version", 0) old_api = api_version == 0 # Deserialize params from json if input_reader/output_writer are new API. input_reader_cls = mapper_spec.input_reader_class() input_reader_params = input_readers._get_params(mapper_spec) if issubclass(input_reader_cls, input_reader.InputReader): input_reader_params = input_reader_cls.params_from_json( input_reader_params) output_writer_cls = mapper_spec.output_writer_class() output_writer_params = output_writers._get_params(mapper_spec) # TODO(user): Call json (de)serialization for writer. # if (output_writer_cls and # issubclass(output_writer_cls, output_writer.OutputWriter)): # output_writer_params = output_writer_cls.params_from_json( # output_writer_params) # We can not always convert MapreduceSpec generated by older API # to JobConfig. Thus, mr framework should use/expose the returned JobConfig # object with caution when a job is started with an old API. # In this case, this method only tries not to blow up and assemble a # JobConfig object as accurate as possible. return cls(_lenient=old_api, job_name=mr_spec.name, job_id=mr_spec.mapreduce_id, # handler_spec from older API may not have map_job.Mapper type. mapper=util.for_name(mapper_spec.handler_spec), input_reader_cls=input_reader_cls, input_reader_params=input_reader_params, output_writer_cls=output_writer_cls, output_writer_params=output_writer_params, shard_count=mapper_spec.shard_count, queue_name=queue_name, user_params=mr_spec.params.get("user_params"), shard_max_attempts=mr_spec.params.get("shard_max_attempts"), done_callback_url=mr_spec.params.get("done_callback"), _force_writes=mr_spec.params.get("force_writes"), _base_path=mr_spec.params["base_path"], _task_max_attempts=mr_spec.params.get("task_max_attempts"), _task_max_data_processing_attempts=( mr_spec.params.get("task_max_data_processing_attempts")), _hooks_cls=util.for_name(mr_spec.hooks_class_name), _app=mr_spec.params.get("app_id"), _api_version=api_version)
python
def _to_map_job_config(cls, mr_spec, # TODO(user): Remove this parameter after it can be # read from mr_spec. queue_name): """Converts model.MapreduceSpec back to JobConfig. This method allows our internal methods to use JobConfig directly. This method also allows us to expose JobConfig as an API during execution, despite that it is not saved into datastore. Args: mr_spec: model.MapreduceSpec. queue_name: queue name. Returns: The JobConfig object for this job. """ mapper_spec = mr_spec.mapper # 0 means all the old APIs before api_version is introduced. api_version = mr_spec.params.get("api_version", 0) old_api = api_version == 0 # Deserialize params from json if input_reader/output_writer are new API. input_reader_cls = mapper_spec.input_reader_class() input_reader_params = input_readers._get_params(mapper_spec) if issubclass(input_reader_cls, input_reader.InputReader): input_reader_params = input_reader_cls.params_from_json( input_reader_params) output_writer_cls = mapper_spec.output_writer_class() output_writer_params = output_writers._get_params(mapper_spec) # TODO(user): Call json (de)serialization for writer. # if (output_writer_cls and # issubclass(output_writer_cls, output_writer.OutputWriter)): # output_writer_params = output_writer_cls.params_from_json( # output_writer_params) # We can not always convert MapreduceSpec generated by older API # to JobConfig. Thus, mr framework should use/expose the returned JobConfig # object with caution when a job is started with an old API. # In this case, this method only tries not to blow up and assemble a # JobConfig object as accurate as possible. return cls(_lenient=old_api, job_name=mr_spec.name, job_id=mr_spec.mapreduce_id, # handler_spec from older API may not have map_job.Mapper type. mapper=util.for_name(mapper_spec.handler_spec), input_reader_cls=input_reader_cls, input_reader_params=input_reader_params, output_writer_cls=output_writer_cls, output_writer_params=output_writer_params, shard_count=mapper_spec.shard_count, queue_name=queue_name, user_params=mr_spec.params.get("user_params"), shard_max_attempts=mr_spec.params.get("shard_max_attempts"), done_callback_url=mr_spec.params.get("done_callback"), _force_writes=mr_spec.params.get("force_writes"), _base_path=mr_spec.params["base_path"], _task_max_attempts=mr_spec.params.get("task_max_attempts"), _task_max_data_processing_attempts=( mr_spec.params.get("task_max_data_processing_attempts")), _hooks_cls=util.for_name(mr_spec.hooks_class_name), _app=mr_spec.params.get("app_id"), _api_version=api_version)
Converts model.MapreduceSpec back to JobConfig. This method allows our internal methods to use JobConfig directly. This method also allows us to expose JobConfig as an API during execution, despite that it is not saved into datastore. Args: mr_spec: model.MapreduceSpec. queue_name: queue name. Returns: The JobConfig object for this job.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_config.py#L162-L226
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/records.py
RecordsWriter.__write_record
def __write_record(self, record_type, data): """Write single physical record.""" length = len(data) crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) crc = crc32c.crc_update(crc, data) crc = crc32c.crc_finalize(crc) self.__writer.write( struct.pack(_HEADER_FORMAT, _mask_crc(crc), length, record_type)) self.__writer.write(data) self.__position += _HEADER_LENGTH + length
python
def __write_record(self, record_type, data): """Write single physical record.""" length = len(data) crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) crc = crc32c.crc_update(crc, data) crc = crc32c.crc_finalize(crc) self.__writer.write( struct.pack(_HEADER_FORMAT, _mask_crc(crc), length, record_type)) self.__writer.write(data) self.__position += _HEADER_LENGTH + length
Write single physical record.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/records.py#L160-L171
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/records.py
RecordsWriter.write
def write(self, data): """Write single record. Args: data: record data to write as string, byte array or byte sequence. """ block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: # Header won't fit into remainder self.__writer.write('\x00' * block_remaining) self.__position += block_remaining block_remaining = _BLOCK_SIZE if block_remaining < len(data) + _HEADER_LENGTH: first_chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_FIRST, first_chunk) data = data[len(first_chunk):] while True: block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining >= len(data) + _HEADER_LENGTH: self.__write_record(_RECORD_TYPE_LAST, data) break else: chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_MIDDLE, chunk) data = data[len(chunk):] else: self.__write_record(_RECORD_TYPE_FULL, data)
python
def write(self, data): """Write single record. Args: data: record data to write as string, byte array or byte sequence. """ block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: # Header won't fit into remainder self.__writer.write('\x00' * block_remaining) self.__position += block_remaining block_remaining = _BLOCK_SIZE if block_remaining < len(data) + _HEADER_LENGTH: first_chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_FIRST, first_chunk) data = data[len(first_chunk):] while True: block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining >= len(data) + _HEADER_LENGTH: self.__write_record(_RECORD_TYPE_LAST, data) break else: chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_MIDDLE, chunk) data = data[len(chunk):] else: self.__write_record(_RECORD_TYPE_FULL, data)
Write single record. Args: data: record data to write as string, byte array or byte sequence.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/records.py#L173-L202
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/records.py
RecordsWriter._pad_block
def _pad_block(self): """Pad block with 0. Pad current block with 0. Reader will simply treat these as corrupted record and skip the block. This method is idempotent. """ pad_length = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if pad_length and pad_length != _BLOCK_SIZE: self.__writer.write('\x00' * pad_length) self.__position += pad_length
python
def _pad_block(self): """Pad block with 0. Pad current block with 0. Reader will simply treat these as corrupted record and skip the block. This method is idempotent. """ pad_length = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if pad_length and pad_length != _BLOCK_SIZE: self.__writer.write('\x00' * pad_length) self.__position += pad_length
Pad block with 0. Pad current block with 0. Reader will simply treat these as corrupted record and skip the block. This method is idempotent.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/records.py#L213-L224
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/records.py
RecordsReader.__try_read_record
def __try_read_record(self): """Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read. """ block_remaining = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: return ('', _RECORD_TYPE_NONE) header = self.__reader.read(_HEADER_LENGTH) if len(header) != _HEADER_LENGTH: raise EOFError('Read %s bytes instead of %s' % (len(header), _HEADER_LENGTH)) (masked_crc, length, record_type) = struct.unpack(_HEADER_FORMAT, header) crc = _unmask_crc(masked_crc) if length + _HEADER_LENGTH > block_remaining: # A record can't be bigger than one block. raise errors.InvalidRecordError('Length is too big') data = self.__reader.read(length) if len(data) != length: raise EOFError('Not enough data read. Expected: %s but got %s' % (length, len(data))) if record_type == _RECORD_TYPE_NONE: return ('', record_type) actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) actual_crc = crc32c.crc_update(actual_crc, data) actual_crc = crc32c.crc_finalize(actual_crc) if actual_crc != crc: raise errors.InvalidRecordError('Data crc does not match') return (data, record_type)
python
def __try_read_record(self): """Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read. """ block_remaining = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: return ('', _RECORD_TYPE_NONE) header = self.__reader.read(_HEADER_LENGTH) if len(header) != _HEADER_LENGTH: raise EOFError('Read %s bytes instead of %s' % (len(header), _HEADER_LENGTH)) (masked_crc, length, record_type) = struct.unpack(_HEADER_FORMAT, header) crc = _unmask_crc(masked_crc) if length + _HEADER_LENGTH > block_remaining: # A record can't be bigger than one block. raise errors.InvalidRecordError('Length is too big') data = self.__reader.read(length) if len(data) != length: raise EOFError('Not enough data read. Expected: %s but got %s' % (length, len(data))) if record_type == _RECORD_TYPE_NONE: return ('', record_type) actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) actual_crc = crc32c.crc_update(actual_crc, data) actual_crc = crc32c.crc_finalize(actual_crc) if actual_crc != crc: raise errors.InvalidRecordError('Data crc does not match') return (data, record_type)
Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/records.py#L239-L278
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/records.py
RecordsReader.__sync
def __sync(self): """Skip reader to the block boundary.""" pad_length = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if pad_length and pad_length != _BLOCK_SIZE: data = self.__reader.read(pad_length) if len(data) != pad_length: raise EOFError('Read %d bytes instead of %d' % (len(data), pad_length))
python
def __sync(self): """Skip reader to the block boundary.""" pad_length = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if pad_length and pad_length != _BLOCK_SIZE: data = self.__reader.read(pad_length) if len(data) != pad_length: raise EOFError('Read %d bytes instead of %d' % (len(data), pad_length))
Skip reader to the block boundary.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/records.py#L280-L287
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/records.py
RecordsReader.read
def read(self): """Reads record from current position in reader. Returns: original bytes stored in a single record. """ data = None while True: last_offset = self.tell() try: (chunk, record_type) = self.__try_read_record() if record_type == _RECORD_TYPE_NONE: self.__sync() elif record_type == _RECORD_TYPE_FULL: if data is not None: logging.warning( "Ordering corruption: Got FULL record while already " "in a chunk at offset %d", last_offset) return chunk elif record_type == _RECORD_TYPE_FIRST: if data is not None: logging.warning( "Ordering corruption: Got FIRST record while already " "in a chunk at offset %d", last_offset) data = chunk elif record_type == _RECORD_TYPE_MIDDLE: if data is None: logging.warning( "Ordering corruption: Got MIDDLE record before FIRST " "record at offset %d", last_offset) else: data += chunk elif record_type == _RECORD_TYPE_LAST: if data is None: logging.warning( "Ordering corruption: Got LAST record but no chunk is in " "progress at offset %d", last_offset) else: result = data + chunk data = None return result else: raise errors.InvalidRecordError( "Unsupported record type: %s" % record_type) except errors.InvalidRecordError, e: logging.warning("Invalid record encountered at %s (%s). Syncing to " "the next block", last_offset, e) data = None self.__sync()
python
def read(self): """Reads record from current position in reader. Returns: original bytes stored in a single record. """ data = None while True: last_offset = self.tell() try: (chunk, record_type) = self.__try_read_record() if record_type == _RECORD_TYPE_NONE: self.__sync() elif record_type == _RECORD_TYPE_FULL: if data is not None: logging.warning( "Ordering corruption: Got FULL record while already " "in a chunk at offset %d", last_offset) return chunk elif record_type == _RECORD_TYPE_FIRST: if data is not None: logging.warning( "Ordering corruption: Got FIRST record while already " "in a chunk at offset %d", last_offset) data = chunk elif record_type == _RECORD_TYPE_MIDDLE: if data is None: logging.warning( "Ordering corruption: Got MIDDLE record before FIRST " "record at offset %d", last_offset) else: data += chunk elif record_type == _RECORD_TYPE_LAST: if data is None: logging.warning( "Ordering corruption: Got LAST record but no chunk is in " "progress at offset %d", last_offset) else: result = data + chunk data = None return result else: raise errors.InvalidRecordError( "Unsupported record type: %s" % record_type) except errors.InvalidRecordError, e: logging.warning("Invalid record encountered at %s (%s). Syncing to " "the next block", last_offset, e) data = None self.__sync()
Reads record from current position in reader. Returns: original bytes stored in a single record.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/records.py#L289-L338
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/mapper_pipeline.py
MapperPipeline.run
def run(self, job_name, handler_spec, input_reader_spec, output_writer_spec=None, params=None, shards=None, base_path=None): """Start a mapreduce job. Args: job_name: mapreduce name. Only for display purpose. handler_spec: fully qualified name to your map function/class. input_reader_spec: fully qualified name to input reader class. output_writer_spec: fully qualified name to output writer class. params: a dictionary of parameters for input reader and output writer initialization. shards: number of shards. This provides a guide to mapreduce. The real number of shards is determined by how input are splited. """ if shards is None: shards = parameters.config.SHARD_COUNT if base_path is None: base_path = parameters.config.BASE_PATH mapreduce_id = control.start_map( job_name, handler_spec, input_reader_spec, params or {}, mapreduce_parameters={ "done_callback": self.get_callback_url(), "done_callback_method": "GET", "pipeline_id": self.pipeline_id, "base_path": base_path, }, shard_count=shards, output_writer_spec=output_writer_spec, queue_name=self.queue_name, ) self.fill(self.outputs.job_id, mapreduce_id) self.set_status(console_url="%s/detail?mapreduce_id=%s" % ( (base_path, mapreduce_id)))
python
def run(self, job_name, handler_spec, input_reader_spec, output_writer_spec=None, params=None, shards=None, base_path=None): """Start a mapreduce job. Args: job_name: mapreduce name. Only for display purpose. handler_spec: fully qualified name to your map function/class. input_reader_spec: fully qualified name to input reader class. output_writer_spec: fully qualified name to output writer class. params: a dictionary of parameters for input reader and output writer initialization. shards: number of shards. This provides a guide to mapreduce. The real number of shards is determined by how input are splited. """ if shards is None: shards = parameters.config.SHARD_COUNT if base_path is None: base_path = parameters.config.BASE_PATH mapreduce_id = control.start_map( job_name, handler_spec, input_reader_spec, params or {}, mapreduce_parameters={ "done_callback": self.get_callback_url(), "done_callback_method": "GET", "pipeline_id": self.pipeline_id, "base_path": base_path, }, shard_count=shards, output_writer_spec=output_writer_spec, queue_name=self.queue_name, ) self.fill(self.outputs.job_id, mapreduce_id) self.set_status(console_url="%s/detail?mapreduce_id=%s" % ( (base_path, mapreduce_id)))
Start a mapreduce job. Args: job_name: mapreduce name. Only for display purpose. handler_spec: fully qualified name to your map function/class. input_reader_spec: fully qualified name to input reader class. output_writer_spec: fully qualified name to output writer class. params: a dictionary of parameters for input reader and output writer initialization. shards: number of shards. This provides a guide to mapreduce. The real number of shards is determined by how input are splited.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/mapper_pipeline.py#L65-L106
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/mapper_pipeline.py
MapperPipeline.callback
def callback(self): """Callback after this async pipeline finishes.""" if self.was_aborted: return mapreduce_id = self.outputs.job_id.value mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) if mapreduce_state.result_status != model.MapreduceState.RESULT_SUCCESS: self.retry("Job %s had status %s" % ( mapreduce_id, mapreduce_state.result_status)) return mapper_spec = mapreduce_state.mapreduce_spec.mapper outputs = [] output_writer_class = mapper_spec.output_writer_class() if (output_writer_class and mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS): outputs = output_writer_class.get_filenames(mapreduce_state) self.fill(self.outputs.result_status, mapreduce_state.result_status) self.fill(self.outputs.counters, mapreduce_state.counters_map.to_dict()) self.complete(outputs)
python
def callback(self): """Callback after this async pipeline finishes.""" if self.was_aborted: return mapreduce_id = self.outputs.job_id.value mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) if mapreduce_state.result_status != model.MapreduceState.RESULT_SUCCESS: self.retry("Job %s had status %s" % ( mapreduce_id, mapreduce_state.result_status)) return mapper_spec = mapreduce_state.mapreduce_spec.mapper outputs = [] output_writer_class = mapper_spec.output_writer_class() if (output_writer_class and mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS): outputs = output_writer_class.get_filenames(mapreduce_state) self.fill(self.outputs.result_status, mapreduce_state.result_status) self.fill(self.outputs.counters, mapreduce_state.counters_map.to_dict()) self.complete(outputs)
Callback after this async pipeline finishes.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/mapper_pipeline.py#L112-L133
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/control.py
start_map
def start_map(name, handler_spec, reader_spec, mapper_parameters, shard_count=None, output_writer_spec=None, mapreduce_parameters=None, base_path=None, queue_name=None, eta=None, countdown=None, hooks_class_name=None, _app=None, in_xg_transaction=False): """Start a new, mapper-only mapreduce. Deprecated! Use map_job.start instead. If a value can be specified both from an explicit argument and from a dictionary, the value from the explicit argument wins. Args: name: mapreduce name. Used only for display purposes. handler_spec: fully qualified name of mapper handler function/class to call. reader_spec: fully qualified name of mapper reader to use mapper_parameters: dictionary of parameters to pass to mapper. These are mapper-specific and also used for reader/writer initialization. Should have format {"input_reader": {}, "output_writer":{}}. Old deprecated style does not have sub dictionaries. shard_count: number of shards to create. mapreduce_parameters: dictionary of mapreduce parameters relevant to the whole job. base_path: base path of mapreduce library handler specified in app.yaml. "/mapreduce" by default. queue_name: taskqueue queue name to be used for mapreduce tasks. see util.get_queue_name. eta: absolute time when the MR should execute. May not be specified if 'countdown' is also supplied. This may be timezone-aware or timezone-naive. countdown: time in seconds into the future that this MR should execute. Defaults to zero. hooks_class_name: fully qualified name of a hooks.Hooks subclass. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: mapreduce id as string. """ if shard_count is None: shard_count = parameters.config.SHARD_COUNT if mapper_parameters: mapper_parameters = dict(mapper_parameters) # Make sure this old API fill all parameters with default values. mr_params = map_job.JobConfig._get_default_mr_params() if mapreduce_parameters: mr_params.update(mapreduce_parameters) # Override default values if user specified them as arguments. if base_path: mr_params["base_path"] = base_path mr_params["queue_name"] = util.get_queue_name(queue_name) mapper_spec = model.MapperSpec(handler_spec, reader_spec, mapper_parameters, shard_count, output_writer_spec=output_writer_spec) if in_xg_transaction and not db.is_in_transaction(): logging.warning("Expects an opened xg transaction to start mapreduce " "when transactional is True.") return handlers.StartJobHandler._start_map( name, mapper_spec, mr_params, # TODO(user): Now that "queue_name" is part of mr_params. # Remove all the other ways to get queue_name after one release. queue_name=mr_params["queue_name"], eta=eta, countdown=countdown, hooks_class_name=hooks_class_name, _app=_app, in_xg_transaction=in_xg_transaction)
python
def start_map(name, handler_spec, reader_spec, mapper_parameters, shard_count=None, output_writer_spec=None, mapreduce_parameters=None, base_path=None, queue_name=None, eta=None, countdown=None, hooks_class_name=None, _app=None, in_xg_transaction=False): """Start a new, mapper-only mapreduce. Deprecated! Use map_job.start instead. If a value can be specified both from an explicit argument and from a dictionary, the value from the explicit argument wins. Args: name: mapreduce name. Used only for display purposes. handler_spec: fully qualified name of mapper handler function/class to call. reader_spec: fully qualified name of mapper reader to use mapper_parameters: dictionary of parameters to pass to mapper. These are mapper-specific and also used for reader/writer initialization. Should have format {"input_reader": {}, "output_writer":{}}. Old deprecated style does not have sub dictionaries. shard_count: number of shards to create. mapreduce_parameters: dictionary of mapreduce parameters relevant to the whole job. base_path: base path of mapreduce library handler specified in app.yaml. "/mapreduce" by default. queue_name: taskqueue queue name to be used for mapreduce tasks. see util.get_queue_name. eta: absolute time when the MR should execute. May not be specified if 'countdown' is also supplied. This may be timezone-aware or timezone-naive. countdown: time in seconds into the future that this MR should execute. Defaults to zero. hooks_class_name: fully qualified name of a hooks.Hooks subclass. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: mapreduce id as string. """ if shard_count is None: shard_count = parameters.config.SHARD_COUNT if mapper_parameters: mapper_parameters = dict(mapper_parameters) # Make sure this old API fill all parameters with default values. mr_params = map_job.JobConfig._get_default_mr_params() if mapreduce_parameters: mr_params.update(mapreduce_parameters) # Override default values if user specified them as arguments. if base_path: mr_params["base_path"] = base_path mr_params["queue_name"] = util.get_queue_name(queue_name) mapper_spec = model.MapperSpec(handler_spec, reader_spec, mapper_parameters, shard_count, output_writer_spec=output_writer_spec) if in_xg_transaction and not db.is_in_transaction(): logging.warning("Expects an opened xg transaction to start mapreduce " "when transactional is True.") return handlers.StartJobHandler._start_map( name, mapper_spec, mr_params, # TODO(user): Now that "queue_name" is part of mr_params. # Remove all the other ways to get queue_name after one release. queue_name=mr_params["queue_name"], eta=eta, countdown=countdown, hooks_class_name=hooks_class_name, _app=_app, in_xg_transaction=in_xg_transaction)
Start a new, mapper-only mapreduce. Deprecated! Use map_job.start instead. If a value can be specified both from an explicit argument and from a dictionary, the value from the explicit argument wins. Args: name: mapreduce name. Used only for display purposes. handler_spec: fully qualified name of mapper handler function/class to call. reader_spec: fully qualified name of mapper reader to use mapper_parameters: dictionary of parameters to pass to mapper. These are mapper-specific and also used for reader/writer initialization. Should have format {"input_reader": {}, "output_writer":{}}. Old deprecated style does not have sub dictionaries. shard_count: number of shards to create. mapreduce_parameters: dictionary of mapreduce parameters relevant to the whole job. base_path: base path of mapreduce library handler specified in app.yaml. "/mapreduce" by default. queue_name: taskqueue queue name to be used for mapreduce tasks. see util.get_queue_name. eta: absolute time when the MR should execute. May not be specified if 'countdown' is also supplied. This may be timezone-aware or timezone-naive. countdown: time in seconds into the future that this MR should execute. Defaults to zero. hooks_class_name: fully qualified name of a hooks.Hooks subclass. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: mapreduce id as string.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/control.py#L37-L125
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_control.py
Job.get_status
def get_status(self): """Get status enum. Returns: One of the status enum. """ self.__update_state() if self._state.active: return self.RUNNING else: return self._state.result_status
python
def get_status(self): """Get status enum. Returns: One of the status enum. """ self.__update_state() if self._state.active: return self.RUNNING else: return self._state.result_status
Get status enum. Returns: One of the status enum.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_control.py#L70-L80
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_control.py
Job.get_counter
def get_counter(self, counter_name, default=0): """Get the value of the named counter from this job. When a job is running, counter values won't be very accurate. Args: counter_name: name of the counter in string. default: default value if the counter doesn't exist. Returns: Value in int of the named counter. """ self.__update_state() return self._state.counters_map.get(counter_name, default)
python
def get_counter(self, counter_name, default=0): """Get the value of the named counter from this job. When a job is running, counter values won't be very accurate. Args: counter_name: name of the counter in string. default: default value if the counter doesn't exist. Returns: Value in int of the named counter. """ self.__update_state() return self._state.counters_map.get(counter_name, default)
Get the value of the named counter from this job. When a job is running, counter values won't be very accurate. Args: counter_name: name of the counter in string. default: default value if the counter doesn't exist. Returns: Value in int of the named counter.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_control.py#L98-L111
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_control.py
Job.get_outputs
def get_outputs(self): """Get outputs of this job. Should only call if status is SUCCESS. Yields: Iterators, one for each shard. Each iterator is from the argument of map_job.output_writer.commit_output. """ assert self.SUCCESS == self.get_status() ss = model.ShardState.find_all_by_mapreduce_state(self._state) for s in ss: yield iter(s.writer_state.get("outs", []))
python
def get_outputs(self): """Get outputs of this job. Should only call if status is SUCCESS. Yields: Iterators, one for each shard. Each iterator is from the argument of map_job.output_writer.commit_output. """ assert self.SUCCESS == self.get_status() ss = model.ShardState.find_all_by_mapreduce_state(self._state) for s in ss: yield iter(s.writer_state.get("outs", []))
Get outputs of this job. Should only call if status is SUCCESS. Yields: Iterators, one for each shard. Each iterator is from the argument of map_job.output_writer.commit_output.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_control.py#L113-L125
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_control.py
Job.submit
def submit(cls, job_config, in_xg_transaction=False): """Submit the job to run. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: a Job instance representing the submitted job. """ cls.__validate_job_config(job_config) mapper_spec = job_config._get_mapper_spec() # Create mr spec. mapreduce_params = job_config._get_mr_params() mapreduce_spec = model.MapreduceSpec( job_config.job_name, job_config.job_id, mapper_spec.to_json(), mapreduce_params, util._obj_to_path(job_config._hooks_cls)) # Save states and enqueue task. if in_xg_transaction: propagation = db.MANDATORY else: propagation = db.INDEPENDENT state = None @db.transactional(propagation=propagation) def _txn(): state = cls.__create_and_save_state(job_config, mapreduce_spec) cls.__add_kickoff_task(job_config, mapreduce_spec) return state state = _txn() return cls(state)
python
def submit(cls, job_config, in_xg_transaction=False): """Submit the job to run. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: a Job instance representing the submitted job. """ cls.__validate_job_config(job_config) mapper_spec = job_config._get_mapper_spec() # Create mr spec. mapreduce_params = job_config._get_mr_params() mapreduce_spec = model.MapreduceSpec( job_config.job_name, job_config.job_id, mapper_spec.to_json(), mapreduce_params, util._obj_to_path(job_config._hooks_cls)) # Save states and enqueue task. if in_xg_transaction: propagation = db.MANDATORY else: propagation = db.INDEPENDENT state = None @db.transactional(propagation=propagation) def _txn(): state = cls.__create_and_save_state(job_config, mapreduce_spec) cls.__add_kickoff_task(job_config, mapreduce_spec) return state state = _txn() return cls(state)
Submit the job to run. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: a Job instance representing the submitted job.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_control.py#L128-L168
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_control.py
Job.__update_state
def __update_state(self): """Fetches most up to date state from db.""" # Only if the job was not in a terminal state. if self._state.active: self._state = self.__get_state_by_id(self.job_config.job_id)
python
def __update_state(self): """Fetches most up to date state from db.""" # Only if the job was not in a terminal state. if self._state.active: self._state = self.__get_state_by_id(self.job_config.job_id)
Fetches most up to date state from db.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_control.py#L170-L174
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_control.py
Job.__get_state_by_id
def __get_state_by_id(cls, job_id): """Get job state by id. Args: job_id: job id. Returns: model.MapreduceState for the job. Raises: ValueError: if the job state is missing. """ state = model.MapreduceState.get_by_job_id(job_id) if state is None: raise ValueError("Job state for job %s is missing." % job_id) return state
python
def __get_state_by_id(cls, job_id): """Get job state by id. Args: job_id: job id. Returns: model.MapreduceState for the job. Raises: ValueError: if the job state is missing. """ state = model.MapreduceState.get_by_job_id(job_id) if state is None: raise ValueError("Job state for job %s is missing." % job_id) return state
Get job state by id. Args: job_id: job id. Returns: model.MapreduceState for the job. Raises: ValueError: if the job state is missing.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_control.py#L177-L192
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_control.py
Job.__create_and_save_state
def __create_and_save_state(cls, job_config, mapreduce_spec): """Save map job state to datastore. Save state to datastore so that UI can see it immediately. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec. Returns: model.MapreduceState for this job. """ state = model.MapreduceState.create_new(job_config.job_id) state.mapreduce_spec = mapreduce_spec state.active = True state.active_shards = 0 state.app_id = job_config._app config = datastore_rpc.Configuration(force_writes=job_config._force_writes) state.put(config=config) return state
python
def __create_and_save_state(cls, job_config, mapreduce_spec): """Save map job state to datastore. Save state to datastore so that UI can see it immediately. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec. Returns: model.MapreduceState for this job. """ state = model.MapreduceState.create_new(job_config.job_id) state.mapreduce_spec = mapreduce_spec state.active = True state.active_shards = 0 state.app_id = job_config._app config = datastore_rpc.Configuration(force_writes=job_config._force_writes) state.put(config=config) return state
Save map job state to datastore. Save state to datastore so that UI can see it immediately. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec. Returns: model.MapreduceState for this job.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_control.py#L202-L221
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/map_job_control.py
Job.__add_kickoff_task
def __add_kickoff_task(cls, job_config, mapreduce_spec): """Add kickoff task to taskqueue. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec, """ params = {"mapreduce_id": job_config.job_id} # Task is not named so that it can be added within a transaction. kickoff_task = taskqueue.Task( # TODO(user): Perhaps make this url a computed field of job_config. url=job_config._base_path + "/kickoffjob_callback/" + job_config.job_id, headers=util._get_task_headers(job_config.job_id), params=params) if job_config._hooks_cls: hooks = job_config._hooks_cls(mapreduce_spec) try: hooks.enqueue_kickoff_task(kickoff_task, job_config.queue_name) return except NotImplementedError: pass kickoff_task.add(job_config.queue_name, transactional=True)
python
def __add_kickoff_task(cls, job_config, mapreduce_spec): """Add kickoff task to taskqueue. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec, """ params = {"mapreduce_id": job_config.job_id} # Task is not named so that it can be added within a transaction. kickoff_task = taskqueue.Task( # TODO(user): Perhaps make this url a computed field of job_config. url=job_config._base_path + "/kickoffjob_callback/" + job_config.job_id, headers=util._get_task_headers(job_config.job_id), params=params) if job_config._hooks_cls: hooks = job_config._hooks_cls(mapreduce_spec) try: hooks.enqueue_kickoff_task(kickoff_task, job_config.queue_name) return except NotImplementedError: pass kickoff_task.add(job_config.queue_name, transactional=True)
Add kickoff task to taskqueue. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec,
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/map_job_control.py#L224-L245
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/json_util.py
JsonMixin.to_json_str
def to_json_str(self): """Convert data to json string representation. Returns: json representation as string. """ _json = self.to_json() try: return json.dumps(_json, sort_keys=True, cls=JsonEncoder) except: logging.exception("Could not serialize JSON: %r", _json) raise
python
def to_json_str(self): """Convert data to json string representation. Returns: json representation as string. """ _json = self.to_json() try: return json.dumps(_json, sort_keys=True, cls=JsonEncoder) except: logging.exception("Could not serialize JSON: %r", _json) raise
Convert data to json string representation. Returns: json representation as string.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/json_util.py#L135-L146
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/json_util.py
JsonMixin.from_json_str
def from_json_str(cls, json_str): """Convert json string representation into class instance. Args: json_str: json representation as string. Returns: New instance of the class with data loaded from json string. """ return cls.from_json(json.loads(json_str, cls=JsonDecoder))
python
def from_json_str(cls, json_str): """Convert json string representation into class instance. Args: json_str: json representation as string. Returns: New instance of the class with data loaded from json string. """ return cls.from_json(json.loads(json_str, cls=JsonDecoder))
Convert json string representation into class instance. Args: json_str: json representation as string. Returns: New instance of the class with data loaded from json string.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/json_util.py#L149-L158
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/json_util.py
JsonProperty.get_value_for_datastore
def get_value_for_datastore(self, model_instance): """Gets value for datastore. Args: model_instance: instance of the model class. Returns: datastore-compatible value. """ value = super(JsonProperty, self).get_value_for_datastore(model_instance) if not value: return None json_value = value if not isinstance(value, dict): json_value = value.to_json() if not json_value: return None return datastore_types.Text(json.dumps( json_value, sort_keys=True, cls=JsonEncoder))
python
def get_value_for_datastore(self, model_instance): """Gets value for datastore. Args: model_instance: instance of the model class. Returns: datastore-compatible value. """ value = super(JsonProperty, self).get_value_for_datastore(model_instance) if not value: return None json_value = value if not isinstance(value, dict): json_value = value.to_json() if not json_value: return None return datastore_types.Text(json.dumps( json_value, sort_keys=True, cls=JsonEncoder))
Gets value for datastore. Args: model_instance: instance of the model class. Returns: datastore-compatible value.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/json_util.py#L183-L201
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/json_util.py
JsonProperty.make_value_from_datastore
def make_value_from_datastore(self, value): """Convert value from datastore representation. Args: value: datastore value. Returns: value to store in the model. """ if value is None: return None _json = json.loads(value, cls=JsonDecoder) if self.data_type == dict: return _json return self.data_type.from_json(_json)
python
def make_value_from_datastore(self, value): """Convert value from datastore representation. Args: value: datastore value. Returns: value to store in the model. """ if value is None: return None _json = json.loads(value, cls=JsonDecoder) if self.data_type == dict: return _json return self.data_type.from_json(_json)
Convert value from datastore representation. Args: value: datastore value. Returns: value to store in the model.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/json_util.py#L203-L218
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/json_util.py
JsonProperty.validate
def validate(self, value): """Validate value. Args: value: model value. Returns: Whether the specified value is valid data type value. Raises: BadValueError: when value is not of self.data_type type. """ if value is not None and not isinstance(value, self.data_type): raise datastore_errors.BadValueError( "Property %s must be convertible to a %s instance (%s)" % (self.name, self.data_type, value)) return super(JsonProperty, self).validate(value)
python
def validate(self, value): """Validate value. Args: value: model value. Returns: Whether the specified value is valid data type value. Raises: BadValueError: when value is not of self.data_type type. """ if value is not None and not isinstance(value, self.data_type): raise datastore_errors.BadValueError( "Property %s must be convertible to a %s instance (%s)" % (self.name, self.data_type, value)) return super(JsonProperty, self).validate(value)
Validate value. Args: value: model value. Returns: Whether the specified value is valid data type value. Raises: BadValueError: when value is not of self.data_type type.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/json_util.py#L220-L236
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
HugeTask.add
def add(self, queue_name, transactional=False): """Add task to the queue.""" task = self.to_task() task.add(queue_name, transactional)
python
def add(self, queue_name, transactional=False): """Add task to the queue.""" task = self.to_task() task.add(queue_name, transactional)
Add task to the queue.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L161-L164
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
HugeTask.to_task
def to_task(self): """Convert to a taskqueue task.""" # Never pass params to taskqueue.Task. Use payload instead. Otherwise, # it's up to a particular taskqueue implementation to generate # payload from params. It could blow up payload size over limit. return taskqueue.Task( url=self.url, payload=self._payload, name=self.name, eta=self.eta, countdown=self.countdown, headers=self._headers)
python
def to_task(self): """Convert to a taskqueue task.""" # Never pass params to taskqueue.Task. Use payload instead. Otherwise, # it's up to a particular taskqueue implementation to generate # payload from params. It could blow up payload size over limit. return taskqueue.Task( url=self.url, payload=self._payload, name=self.name, eta=self.eta, countdown=self.countdown, headers=self._headers)
Convert to a taskqueue task.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L166-L177
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
HugeTask.decode_payload
def decode_payload(cls, request): """Decode task payload. HugeTask controls its own payload entirely including urlencoding. It doesn't depend on any particular web framework. Args: request: a webapp Request instance. Returns: A dict of str to str. The same as the params argument to __init__. Raises: DeprecationWarning: When task payload constructed from an older incompatible version of mapreduce. """ # TODO(user): Pass mr_id into headers. Otherwise when payload decoding # failed, we can't abort a mr. if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION: raise DeprecationWarning( "Task is generated by an older incompatible version of mapreduce. " "Please kill this job manually") return cls._decode_payload(request.body)
python
def decode_payload(cls, request): """Decode task payload. HugeTask controls its own payload entirely including urlencoding. It doesn't depend on any particular web framework. Args: request: a webapp Request instance. Returns: A dict of str to str. The same as the params argument to __init__. Raises: DeprecationWarning: When task payload constructed from an older incompatible version of mapreduce. """ # TODO(user): Pass mr_id into headers. Otherwise when payload decoding # failed, we can't abort a mr. if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION: raise DeprecationWarning( "Task is generated by an older incompatible version of mapreduce. " "Please kill this job manually") return cls._decode_payload(request.body)
Decode task payload. HugeTask controls its own payload entirely including urlencoding. It doesn't depend on any particular web framework. Args: request: a webapp Request instance. Returns: A dict of str to str. The same as the params argument to __init__. Raises: DeprecationWarning: When task payload constructed from an older incompatible version of mapreduce.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L180-L202
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
CountersMap.increment
def increment(self, counter_name, delta): """Increment counter value. Args: counter_name: counter name as String. delta: increment delta as Integer. Returns: new counter value. """ current_value = self.counters.get(counter_name, 0) new_value = current_value + delta self.counters[counter_name] = new_value return new_value
python
def increment(self, counter_name, delta): """Increment counter value. Args: counter_name: counter name as String. delta: increment delta as Integer. Returns: new counter value. """ current_value = self.counters.get(counter_name, 0) new_value = current_value + delta self.counters[counter_name] = new_value return new_value
Increment counter value. Args: counter_name: counter name as String. delta: increment delta as Integer. Returns: new counter value.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L263-L276
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
CountersMap.add_map
def add_map(self, counters_map): """Add all counters from the map. For each counter in the passed map, adds its value to the counter in this map. Args: counters_map: CounterMap instance to add. """ for counter_name in counters_map.counters: self.increment(counter_name, counters_map.counters[counter_name])
python
def add_map(self, counters_map): """Add all counters from the map. For each counter in the passed map, adds its value to the counter in this map. Args: counters_map: CounterMap instance to add. """ for counter_name in counters_map.counters: self.increment(counter_name, counters_map.counters[counter_name])
Add all counters from the map. For each counter in the passed map, adds its value to the counter in this map. Args: counters_map: CounterMap instance to add.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L278-L288
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
CountersMap.sub_map
def sub_map(self, counters_map): """Subtracts all counters from the map. For each counter in the passed map, subtracts its value to the counter in this map. Args: counters_map: CounterMap instance to subtract. """ for counter_name in counters_map.counters: self.increment(counter_name, -counters_map.counters[counter_name])
python
def sub_map(self, counters_map): """Subtracts all counters from the map. For each counter in the passed map, subtracts its value to the counter in this map. Args: counters_map: CounterMap instance to subtract. """ for counter_name in counters_map.counters: self.increment(counter_name, -counters_map.counters[counter_name])
Subtracts all counters from the map. For each counter in the passed map, subtracts its value to the counter in this map. Args: counters_map: CounterMap instance to subtract.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L290-L300
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapperSpec.to_json
def to_json(self): """Serializes this MapperSpec into a json-izable object.""" result = { "mapper_handler_spec": self.handler_spec, "mapper_input_reader": self.input_reader_spec, "mapper_params": self.params, "mapper_shard_count": self.shard_count } if self.output_writer_spec: result["mapper_output_writer"] = self.output_writer_spec return result
python
def to_json(self): """Serializes this MapperSpec into a json-izable object.""" result = { "mapper_handler_spec": self.handler_spec, "mapper_input_reader": self.input_reader_spec, "mapper_params": self.params, "mapper_shard_count": self.shard_count } if self.output_writer_spec: result["mapper_output_writer"] = self.output_writer_spec return result
Serializes this MapperSpec into a json-izable object.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L412-L422
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceSpec.get_hooks
def get_hooks(self): """Returns a hooks.Hooks class or None if no hooks class has been set.""" if self.__hooks is None and self.hooks_class_name is not None: hooks_class = util.for_name(self.hooks_class_name) if not isinstance(hooks_class, type): raise ValueError("hooks_class_name must refer to a class, got %s" % type(hooks_class).__name__) if not issubclass(hooks_class, hooks.Hooks): raise ValueError( "hooks_class_name must refer to a hooks.Hooks subclass") self.__hooks = hooks_class(self) return self.__hooks
python
def get_hooks(self): """Returns a hooks.Hooks class or None if no hooks class has been set.""" if self.__hooks is None and self.hooks_class_name is not None: hooks_class = util.for_name(self.hooks_class_name) if not isinstance(hooks_class, type): raise ValueError("hooks_class_name must refer to a class, got %s" % type(hooks_class).__name__) if not issubclass(hooks_class, hooks.Hooks): raise ValueError( "hooks_class_name must refer to a hooks.Hooks subclass") self.__hooks = hooks_class(self) return self.__hooks
Returns a hooks.Hooks class or None if no hooks class has been set.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L488-L500
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceSpec.to_json
def to_json(self): """Serializes all data in this mapreduce spec into json form. Returns: data in json format. """ mapper_spec = self.mapper.to_json() return { "name": self.name, "mapreduce_id": self.mapreduce_id, "mapper_spec": mapper_spec, "params": self.params, "hooks_class_name": self.hooks_class_name, }
python
def to_json(self): """Serializes all data in this mapreduce spec into json form. Returns: data in json format. """ mapper_spec = self.mapper.to_json() return { "name": self.name, "mapreduce_id": self.mapreduce_id, "mapper_spec": mapper_spec, "params": self.params, "hooks_class_name": self.hooks_class_name, }
Serializes all data in this mapreduce spec into json form. Returns: data in json format.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L502-L515
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceSpec.from_json
def from_json(cls, json): """Create new MapreduceSpec from the json, encoded by to_json. Args: json: json representation of MapreduceSpec. Returns: an instance of MapreduceSpec with all data deserialized from json. """ mapreduce_spec = cls(json["name"], json["mapreduce_id"], json["mapper_spec"], json.get("params"), json.get("hooks_class_name")) return mapreduce_spec
python
def from_json(cls, json): """Create new MapreduceSpec from the json, encoded by to_json. Args: json: json representation of MapreduceSpec. Returns: an instance of MapreduceSpec with all data deserialized from json. """ mapreduce_spec = cls(json["name"], json["mapreduce_id"], json["mapper_spec"], json.get("params"), json.get("hooks_class_name")) return mapreduce_spec
Create new MapreduceSpec from the json, encoded by to_json. Args: json: json representation of MapreduceSpec. Returns: an instance of MapreduceSpec with all data deserialized from json.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L518-L532
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceSpec._get_mapreduce_spec
def _get_mapreduce_spec(cls, mr_id): """Get Mapreduce spec from mr id.""" key = 'GAE-MR-spec: %s' % mr_id spec_json = memcache.get(key) if spec_json: return cls.from_json(spec_json) state = MapreduceState.get_by_job_id(mr_id) spec = state.mapreduce_spec spec_json = spec.to_json() memcache.set(key, spec_json) return spec
python
def _get_mapreduce_spec(cls, mr_id): """Get Mapreduce spec from mr id.""" key = 'GAE-MR-spec: %s' % mr_id spec_json = memcache.get(key) if spec_json: return cls.from_json(spec_json) state = MapreduceState.get_by_job_id(mr_id) spec = state.mapreduce_spec spec_json = spec.to_json() memcache.set(key, spec_json) return spec
Get Mapreduce spec from mr id.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L543-L553
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceState.get_key_by_job_id
def get_key_by_job_id(cls, mapreduce_id): """Retrieves the Key for a Job. Args: mapreduce_id: The job to retrieve. Returns: Datastore Key that can be used to fetch the MapreduceState. """ return db.Key.from_path(cls.kind(), str(mapreduce_id))
python
def get_key_by_job_id(cls, mapreduce_id): """Retrieves the Key for a Job. Args: mapreduce_id: The job to retrieve. Returns: Datastore Key that can be used to fetch the MapreduceState. """ return db.Key.from_path(cls.kind(), str(mapreduce_id))
Retrieves the Key for a Job. Args: mapreduce_id: The job to retrieve. Returns: Datastore Key that can be used to fetch the MapreduceState.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L614-L623
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceState.set_processed_counts
def set_processed_counts(self, shards_processed, shards_status): """Updates a chart url to display processed count for each shard. Args: shards_processed: list of integers with number of processed entities in each shard """ chart = google_chart_api.BarChart() def filter_status(status_to_filter): return [count if status == status_to_filter else 0 for count, status in zip(shards_processed, shards_status)] if shards_status: # Each index will have only one non-zero count, so stack them to color- # code the bars by status # These status values are computed in _update_state_from_shard_states, # in mapreduce/handlers.py. chart.stacked = True chart.AddBars(filter_status("unknown"), color="404040") chart.AddBars(filter_status("success"), color="00ac42") chart.AddBars(filter_status("running"), color="3636a9") chart.AddBars(filter_status("aborted"), color="e29e24") chart.AddBars(filter_status("failed"), color="f6350f") else: chart.AddBars(shards_processed) shard_count = len(shards_processed) if shard_count > 95: # Auto-spacing does not work for large numbers of shards. pixels_per_shard = 700.0 / shard_count bar_thickness = int(pixels_per_shard * .9) chart.style = bar_chart.BarChartStyle(bar_thickness=bar_thickness, bar_gap=0.1, use_fractional_gap_spacing=True) if shards_processed and shard_count <= 95: # Adding labels puts us in danger of exceeding the URL length, only # do it when we have a small amount of data to plot. # Only 16 labels on the whole chart. stride_length = max(1, shard_count / 16) chart.bottom.labels = [] for x in xrange(shard_count): if (x % stride_length == 0 or x == shard_count - 1): chart.bottom.labels.append(x) else: chart.bottom.labels.append("") chart.left.labels = ["0", str(max(shards_processed))] chart.left.min = 0 self.chart_width = min(700, max(300, shard_count * 20)) self.chart_url = chart.display.Url(self.chart_width, 200)
python
def set_processed_counts(self, shards_processed, shards_status): """Updates a chart url to display processed count for each shard. Args: shards_processed: list of integers with number of processed entities in each shard """ chart = google_chart_api.BarChart() def filter_status(status_to_filter): return [count if status == status_to_filter else 0 for count, status in zip(shards_processed, shards_status)] if shards_status: # Each index will have only one non-zero count, so stack them to color- # code the bars by status # These status values are computed in _update_state_from_shard_states, # in mapreduce/handlers.py. chart.stacked = True chart.AddBars(filter_status("unknown"), color="404040") chart.AddBars(filter_status("success"), color="00ac42") chart.AddBars(filter_status("running"), color="3636a9") chart.AddBars(filter_status("aborted"), color="e29e24") chart.AddBars(filter_status("failed"), color="f6350f") else: chart.AddBars(shards_processed) shard_count = len(shards_processed) if shard_count > 95: # Auto-spacing does not work for large numbers of shards. pixels_per_shard = 700.0 / shard_count bar_thickness = int(pixels_per_shard * .9) chart.style = bar_chart.BarChartStyle(bar_thickness=bar_thickness, bar_gap=0.1, use_fractional_gap_spacing=True) if shards_processed and shard_count <= 95: # Adding labels puts us in danger of exceeding the URL length, only # do it when we have a small amount of data to plot. # Only 16 labels on the whole chart. stride_length = max(1, shard_count / 16) chart.bottom.labels = [] for x in xrange(shard_count): if (x % stride_length == 0 or x == shard_count - 1): chart.bottom.labels.append(x) else: chart.bottom.labels.append("") chart.left.labels = ["0", str(max(shards_processed))] chart.left.min = 0 self.chart_width = min(700, max(300, shard_count * 20)) self.chart_url = chart.display.Url(self.chart_width, 200)
Updates a chart url to display processed count for each shard. Args: shards_processed: list of integers with number of processed entities in each shard
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L637-L690
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceState.create_new
def create_new(mapreduce_id=None, gettime=datetime.datetime.now): """Create a new MapreduceState. Args: mapreduce_id: Mapreduce id as string. gettime: Used for testing. """ if not mapreduce_id: mapreduce_id = MapreduceState.new_mapreduce_id() state = MapreduceState(key_name=mapreduce_id, last_poll_time=gettime()) state.set_processed_counts([], []) return state
python
def create_new(mapreduce_id=None, gettime=datetime.datetime.now): """Create a new MapreduceState. Args: mapreduce_id: Mapreduce id as string. gettime: Used for testing. """ if not mapreduce_id: mapreduce_id = MapreduceState.new_mapreduce_id() state = MapreduceState(key_name=mapreduce_id, last_poll_time=gettime()) state.set_processed_counts([], []) return state
Create a new MapreduceState. Args: mapreduce_id: Mapreduce id as string. gettime: Used for testing.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L703-L716
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
TransientShardState.reset_for_retry
def reset_for_retry(self, output_writer): """Reset self for shard retry. Args: output_writer: new output writer that contains new output files. """ self.input_reader = self.initial_input_reader self.slice_id = 0 self.retries += 1 self.output_writer = output_writer self.handler = self.mapreduce_spec.mapper.handler
python
def reset_for_retry(self, output_writer): """Reset self for shard retry. Args: output_writer: new output writer that contains new output files. """ self.input_reader = self.initial_input_reader self.slice_id = 0 self.retries += 1 self.output_writer = output_writer self.handler = self.mapreduce_spec.mapper.handler
Reset self for shard retry. Args: output_writer: new output writer that contains new output files.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L776-L786
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
TransientShardState.advance_for_next_slice
def advance_for_next_slice(self, recovery_slice=False): """Advance relavent states for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info. """ if recovery_slice: self.slice_id += 2 # Restore input reader to the beginning of the slice. self.input_reader = self.input_reader.from_json(self._input_reader_json) else: self.slice_id += 1
python
def advance_for_next_slice(self, recovery_slice=False): """Advance relavent states for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info. """ if recovery_slice: self.slice_id += 2 # Restore input reader to the beginning of the slice. self.input_reader = self.input_reader.from_json(self._input_reader_json) else: self.slice_id += 1
Advance relavent states for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L788-L801
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
TransientShardState.to_dict
def to_dict(self): """Convert state to dictionary to save in task payload.""" result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(), "shard_id": self.shard_id, "slice_id": str(self.slice_id), "input_reader_state": self.input_reader.to_json_str(), "initial_input_reader_state": self.initial_input_reader.to_json_str(), "retries": str(self.retries)} if self.output_writer: result["output_writer_state"] = self.output_writer.to_json_str() serialized_handler = util.try_serialize_handler(self.handler) if serialized_handler: result["serialized_handler"] = serialized_handler return result
python
def to_dict(self): """Convert state to dictionary to save in task payload.""" result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(), "shard_id": self.shard_id, "slice_id": str(self.slice_id), "input_reader_state": self.input_reader.to_json_str(), "initial_input_reader_state": self.initial_input_reader.to_json_str(), "retries": str(self.retries)} if self.output_writer: result["output_writer_state"] = self.output_writer.to_json_str() serialized_handler = util.try_serialize_handler(self.handler) if serialized_handler: result["serialized_handler"] = serialized_handler return result
Convert state to dictionary to save in task payload.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L803-L817
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
TransientShardState.from_request
def from_request(cls, request): """Create new TransientShardState from webapp request.""" mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec")) mapper_spec = mapreduce_spec.mapper input_reader_spec_dict = json.loads(request.get("input_reader_state"), cls=json_util.JsonDecoder) input_reader = mapper_spec.input_reader_class().from_json( input_reader_spec_dict) initial_input_reader_spec_dict = json.loads( request.get("initial_input_reader_state"), cls=json_util.JsonDecoder) initial_input_reader = mapper_spec.input_reader_class().from_json( initial_input_reader_spec_dict) output_writer = None if mapper_spec.output_writer_class(): output_writer = mapper_spec.output_writer_class().from_json( json.loads(request.get("output_writer_state", "{}"), cls=json_util.JsonDecoder)) assert isinstance(output_writer, mapper_spec.output_writer_class()), ( "%s.from_json returned an instance of wrong class: %s" % ( mapper_spec.output_writer_class(), output_writer.__class__)) handler = util.try_deserialize_handler(request.get("serialized_handler")) if not handler: handler = mapreduce_spec.mapper.handler return cls(mapreduce_spec.params["base_path"], mapreduce_spec, str(request.get("shard_id")), int(request.get("slice_id")), input_reader, initial_input_reader, output_writer=output_writer, retries=int(request.get("retries")), handler=handler)
python
def from_request(cls, request): """Create new TransientShardState from webapp request.""" mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec")) mapper_spec = mapreduce_spec.mapper input_reader_spec_dict = json.loads(request.get("input_reader_state"), cls=json_util.JsonDecoder) input_reader = mapper_spec.input_reader_class().from_json( input_reader_spec_dict) initial_input_reader_spec_dict = json.loads( request.get("initial_input_reader_state"), cls=json_util.JsonDecoder) initial_input_reader = mapper_spec.input_reader_class().from_json( initial_input_reader_spec_dict) output_writer = None if mapper_spec.output_writer_class(): output_writer = mapper_spec.output_writer_class().from_json( json.loads(request.get("output_writer_state", "{}"), cls=json_util.JsonDecoder)) assert isinstance(output_writer, mapper_spec.output_writer_class()), ( "%s.from_json returned an instance of wrong class: %s" % ( mapper_spec.output_writer_class(), output_writer.__class__)) handler = util.try_deserialize_handler(request.get("serialized_handler")) if not handler: handler = mapreduce_spec.mapper.handler return cls(mapreduce_spec.params["base_path"], mapreduce_spec, str(request.get("shard_id")), int(request.get("slice_id")), input_reader, initial_input_reader, output_writer=output_writer, retries=int(request.get("retries")), handler=handler)
Create new TransientShardState from webapp request.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L820-L855
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
ShardState.reset_for_retry
def reset_for_retry(self): """Reset self for shard retry.""" self.retries += 1 self.last_work_item = "" self.active = True self.result_status = None self.input_finished = False self.counters_map = CountersMap() self.slice_id = 0 self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False
python
def reset_for_retry(self): """Reset self for shard retry.""" self.retries += 1 self.last_work_item = "" self.active = True self.result_status = None self.input_finished = False self.counters_map = CountersMap() self.slice_id = 0 self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False
Reset self for shard retry.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L973-L985
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
ShardState.advance_for_next_slice
def advance_for_next_slice(self, recovery_slice=False): """Advance self for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info. """ self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False if recovery_slice: self.slice_id += 2 else: self.slice_id += 1
python
def advance_for_next_slice(self, recovery_slice=False): """Advance self for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info. """ self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False if recovery_slice: self.slice_id += 2 else: self.slice_id += 1
Advance self for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L987-L1002
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
ShardState.copy_from
def copy_from(self, other_state): """Copy data from another shard state entity to self.""" for prop in self.properties().values(): setattr(self, prop.name, getattr(other_state, prop.name))
python
def copy_from(self, other_state): """Copy data from another shard state entity to self.""" for prop in self.properties().values(): setattr(self, prop.name, getattr(other_state, prop.name))
Copy data from another shard state entity to self.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L1026-L1029
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
ShardState.find_all_by_mapreduce_state
def find_all_by_mapreduce_state(cls, mapreduce_state): """Find all shard states for given mapreduce. Args: mapreduce_state: MapreduceState instance Yields: shard states sorted by shard id. """ keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state) i = 0 while i < len(keys): @db.non_transactional def no_tx_get(i): return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY]) # We need a separate function to so that we can mix non-transactional and # use be a generator states = no_tx_get(i) for s in states: i += 1 if s is not None: yield s
python
def find_all_by_mapreduce_state(cls, mapreduce_state): """Find all shard states for given mapreduce. Args: mapreduce_state: MapreduceState instance Yields: shard states sorted by shard id. """ keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state) i = 0 while i < len(keys): @db.non_transactional def no_tx_get(i): return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY]) # We need a separate function to so that we can mix non-transactional and # use be a generator states = no_tx_get(i) for s in states: i += 1 if s is not None: yield s
Find all shard states for given mapreduce. Args: mapreduce_state: MapreduceState instance Yields: shard states sorted by shard id.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L1106-L1127
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
ShardState.calculate_keys_by_mapreduce_state
def calculate_keys_by_mapreduce_state(cls, mapreduce_state): """Calculate all shard states keys for given mapreduce. Args: mapreduce_state: MapreduceState instance Returns: A list of keys for shard states, sorted by shard id. The corresponding shard states may not exist. """ if mapreduce_state is None: return [] keys = [] for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count): shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i) keys.append(cls.get_key_by_shard_id(shard_id)) return keys
python
def calculate_keys_by_mapreduce_state(cls, mapreduce_state): """Calculate all shard states keys for given mapreduce. Args: mapreduce_state: MapreduceState instance Returns: A list of keys for shard states, sorted by shard id. The corresponding shard states may not exist. """ if mapreduce_state is None: return [] keys = [] for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count): shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i) keys.append(cls.get_key_by_shard_id(shard_id)) return keys
Calculate all shard states keys for given mapreduce. Args: mapreduce_state: MapreduceState instance Returns: A list of keys for shard states, sorted by shard id. The corresponding shard states may not exist.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L1130-L1147
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
ShardState.create_new
def create_new(cls, mapreduce_id, shard_number): """Create new shard state. Args: mapreduce_id: unique mapreduce id as string. shard_number: shard number for which to create shard state. Returns: new instance of ShardState ready to put into datastore. """ shard_id = cls.shard_id_from_number(mapreduce_id, shard_number) state = cls(key_name=shard_id, mapreduce_id=mapreduce_id) return state
python
def create_new(cls, mapreduce_id, shard_number): """Create new shard state. Args: mapreduce_id: unique mapreduce id as string. shard_number: shard number for which to create shard state. Returns: new instance of ShardState ready to put into datastore. """ shard_id = cls.shard_id_from_number(mapreduce_id, shard_number) state = cls(key_name=shard_id, mapreduce_id=mapreduce_id) return state
Create new shard state. Args: mapreduce_id: unique mapreduce id as string. shard_number: shard number for which to create shard state. Returns: new instance of ShardState ready to put into datastore.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L1150-L1163
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceControl.get_key_by_job_id
def get_key_by_job_id(cls, mapreduce_id): """Retrieves the Key for a mapreduce ID. Args: mapreduce_id: The job to fetch. Returns: Datastore Key for the command for the given job ID. """ return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
python
def get_key_by_job_id(cls, mapreduce_id): """Retrieves the Key for a mapreduce ID. Args: mapreduce_id: The job to fetch. Returns: Datastore Key for the command for the given job ID. """ return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
Retrieves the Key for a mapreduce ID. Args: mapreduce_id: The job to fetch. Returns: Datastore Key for the command for the given job ID.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L1188-L1197
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
MapreduceControl.abort
def abort(cls, mapreduce_id, **kwargs): """Causes a job to abort. Args: mapreduce_id: The job to abort. Not verified as a valid job. """ cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME), command=cls.ABORT).put(**kwargs)
python
def abort(cls, mapreduce_id, **kwargs): """Causes a job to abort. Args: mapreduce_id: The job to abort. Not verified as a valid job. """ cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME), command=cls.ABORT).put(**kwargs)
Causes a job to abort. Args: mapreduce_id: The job to abort. Not verified as a valid job.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L1200-L1207
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/datastore_input_reader.py
DatastoreInputReader.validate
def validate(cls, job_config): """Inherit docs.""" super(DatastoreInputReader, cls).validate(job_config) params = job_config.input_reader_params entity_kind = params[cls.ENTITY_KIND_PARAM] # Check for a "." in the entity kind. if "." in entity_kind: logging.warning( ". detected in entity kind %s specified for reader %s." "Assuming entity kind contains the dot.", entity_kind, cls.__name__) # Validate the filters parameters. if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] for f in filters: if f[1] != "=": raise errors.BadReaderParamsError( "Only equality filters are supported: %s", f)
python
def validate(cls, job_config): """Inherit docs.""" super(DatastoreInputReader, cls).validate(job_config) params = job_config.input_reader_params entity_kind = params[cls.ENTITY_KIND_PARAM] # Check for a "." in the entity kind. if "." in entity_kind: logging.warning( ". detected in entity kind %s specified for reader %s." "Assuming entity kind contains the dot.", entity_kind, cls.__name__) # Validate the filters parameters. if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] for f in filters: if f[1] != "=": raise errors.BadReaderParamsError( "Only equality filters are supported: %s", f)
Inherit docs.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/datastore_input_reader.py#L32-L49
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/base_handler.py
TaskQueueHandler.initialize
def initialize(self, request, response): """Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance. """ super(TaskQueueHandler, self).initialize(request, response) # Check request is from taskqueue. if "X-AppEngine-QueueName" not in self.request.headers: logging.error(self.request.headers) logging.error("Task queue handler received non-task queue request") self.response.set_status( 403, message="Task queue handler received non-task queue request") return # Check task has not been retried too many times. if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS: logging.error( "Task %s has been attempted %s times. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"], self.task_retry_count() + 1) self._drop_gracefully() return try: self._preprocess() self._preprocess_success = True # pylint: disable=bare-except except: self._preprocess_success = False logging.error( "Preprocess task %s failed. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"]) self._drop_gracefully()
python
def initialize(self, request, response): """Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance. """ super(TaskQueueHandler, self).initialize(request, response) # Check request is from taskqueue. if "X-AppEngine-QueueName" not in self.request.headers: logging.error(self.request.headers) logging.error("Task queue handler received non-task queue request") self.response.set_status( 403, message="Task queue handler received non-task queue request") return # Check task has not been retried too many times. if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS: logging.error( "Task %s has been attempted %s times. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"], self.task_retry_count() + 1) self._drop_gracefully() return try: self._preprocess() self._preprocess_success = True # pylint: disable=bare-except except: self._preprocess_success = False logging.error( "Preprocess task %s failed. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"]) self._drop_gracefully()
Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/base_handler.py#L93-L134
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/base_handler.py
TaskQueueHandler.retry_task
def retry_task(self): """Ask taskqueue to retry this task. Even though raising an exception can cause a task retry, it will flood logs with highly visible ERROR logs. Handlers should uses this method to perform controlled task retries. Only raise exceptions for those deserve ERROR log entries. """ self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task") self.response.clear()
python
def retry_task(self): """Ask taskqueue to retry this task. Even though raising an exception can cause a task retry, it will flood logs with highly visible ERROR logs. Handlers should uses this method to perform controlled task retries. Only raise exceptions for those deserve ERROR log entries. """ self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task") self.response.clear()
Ask taskqueue to retry this task. Even though raising an exception can cause a task retry, it will flood logs with highly visible ERROR logs. Handlers should uses this method to perform controlled task retries. Only raise exceptions for those deserve ERROR log entries.
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/base_handler.py#L163-L172