Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def decode_index_value(self, index, value): if index.endswith("_int"): return int(value) else: return bytes_to_str(value)
[ "\n Decodes a secondary index value into the correct Python type.\n :param index: the name of the index\n :type index: str\n :param value: the value of the index entry\n :type value: str\n :rtype str or int\n " ]
Please provide a description of the function:def encode_bucket_props(self, props, msg): for prop in NORMAL_PROPS: if prop in props and props[prop] is not None: if isinstance(props[prop], six.string_types): setattr(msg.props, prop, str_to_bytes(props[prop])) else: setattr(msg.props, prop, props[prop]) for prop in COMMIT_HOOK_PROPS: if prop in props: setattr(msg.props, 'has_' + prop, True) self.encode_hooklist(props[prop], getattr(msg.props, prop)) for prop in MODFUN_PROPS: if prop in props and props[prop] is not None: self.encode_modfun(props[prop], getattr(msg.props, prop)) for prop in QUORUM_PROPS: if prop in props and props[prop] not in (None, 'default'): value = self.encode_quorum(props[prop]) if value is not None: if isinstance(value, six.string_types): setattr(msg.props, prop, str_to_bytes(value)) else: setattr(msg.props, prop, value) if 'repl' in props: msg.props.repl = REPL_TO_PB[props['repl']] return msg
[ "\n Encodes a dict of bucket properties into the protobuf message.\n\n :param props: bucket properties\n :type props: dict\n :param msg: the protobuf message to fill\n :type msg: riak.pb.riak_pb2.RpbSetBucketReq\n " ]
Please provide a description of the function:def decode_bucket_props(self, msg): props = {} for prop in NORMAL_PROPS: if msg.HasField(prop): props[prop] = getattr(msg, prop) if isinstance(props[prop], bytes): props[prop] = bytes_to_str(props[prop]) for prop in COMMIT_HOOK_PROPS: if getattr(msg, 'has_' + prop): props[prop] = self.decode_hooklist(getattr(msg, prop)) for prop in MODFUN_PROPS: if msg.HasField(prop): props[prop] = self.decode_modfun(getattr(msg, prop)) for prop in QUORUM_PROPS: if msg.HasField(prop): props[prop] = self.decode_quorum(getattr(msg, prop)) if msg.HasField('repl'): props['repl'] = REPL_TO_PY[msg.repl] return props
[ "\n Decodes the protobuf bucket properties message into a dict.\n\n :param msg: the protobuf message to decode\n :type msg: riak.pb.riak_pb2.RpbBucketProps\n :rtype dict\n " ]
Please provide a description of the function:def encode_modfun(self, props, msg=None): if msg is None: msg = riak.pb.riak_pb2.RpbModFun() msg.module = str_to_bytes(props['mod']) msg.function = str_to_bytes(props['fun']) return msg
[ "\n Encodes a dict with 'mod' and 'fun' keys into a protobuf\n modfun pair. Used in bucket properties.\n\n :param props: the module/function pair\n :type props: dict\n :param msg: the protobuf message to fill\n :type msg: riak.pb.riak_pb2.RpbModFun\n :rtype riak.pb.riak_pb2.RpbModFun\n " ]
Please provide a description of the function:def encode_hooklist(self, hooklist, msg): for hook in hooklist: pbhook = msg.add() self.encode_hook(hook, pbhook)
[ "\n Encodes a list of commit hooks into their protobuf equivalent.\n Used in bucket properties.\n\n :param hooklist: a list of commit hooks\n :type hooklist: list\n :param msg: a protobuf field that is a list of commit hooks\n " ]
Please provide a description of the function:def decode_hook(self, hook): if hook.HasField('modfun'): return self.decode_modfun(hook.modfun) else: return {'name': bytes_to_str(hook.name)}
[ "\n Decodes a protobuf commit hook message into a dict. Used in\n bucket properties.\n\n :param hook: the hook to decode\n :type hook: riak.pb.riak_pb2.RpbCommitHook\n :rtype dict\n " ]
Please provide a description of the function:def encode_hook(self, hook, msg): if 'name' in hook: msg.name = str_to_bytes(hook['name']) else: self.encode_modfun(hook, msg.modfun) return msg
[ "\n Encodes a commit hook dict into the protobuf message. Used in\n bucket properties.\n\n :param hook: the hook to encode\n :type hook: dict\n :param msg: the protobuf message to fill\n :type msg: riak.pb.riak_pb2.RpbCommitHook\n :rtype riak.pb.riak_pb2.RpbCommitHook\n " ]
Please provide a description of the function:def encode_index_req(self, bucket, index, startkey, endkey=None, return_terms=None, max_results=None, continuation=None, timeout=None, term_regex=None, streaming=False): req = riak.pb.riak_kv_pb2.RpbIndexReq( bucket=str_to_bytes(bucket.name), index=str_to_bytes(index)) self._add_bucket_type(req, bucket.bucket_type) if endkey is not None: req.qtype = riak.pb.riak_kv_pb2.RpbIndexReq.range req.range_min = str_to_bytes(str(startkey)) req.range_max = str_to_bytes(str(endkey)) else: req.qtype = riak.pb.riak_kv_pb2.RpbIndexReq.eq req.key = str_to_bytes(str(startkey)) if return_terms is not None: req.return_terms = return_terms if max_results: req.max_results = max_results if continuation: req.continuation = str_to_bytes(continuation) if timeout: if timeout == 'infinity': req.timeout = 0 else: req.timeout = timeout if term_regex: req.term_regex = str_to_bytes(term_regex) req.stream = streaming mc = riak.pb.messages.MSG_CODE_INDEX_REQ rc = riak.pb.messages.MSG_CODE_INDEX_RESP return Msg(mc, req.SerializeToString(), rc)
[ "\n Encodes a secondary index request into the protobuf message.\n\n :param bucket: the bucket whose index to query\n :type bucket: string\n :param index: the index to query\n :type index: string\n :param startkey: the value or beginning of the range\n :type startkey: integer, string\n :param endkey: the end of the range\n :type endkey: integer, string\n :param return_terms: whether to return the index term with the key\n :type return_terms: bool\n :param max_results: the maximum number of results to return (page size)\n :type max_results: integer\n :param continuation: the opaque continuation returned from a\n previous paginated request\n :type continuation: string\n :param timeout: a timeout value in milliseconds, or 'infinity'\n :type timeout: int\n :param term_regex: a regular expression used to filter index terms\n :type term_regex: string\n :param streaming: encode as streaming request\n :type streaming: bool\n :rtype riak.pb.riak_kv_pb2.RpbIndexReq\n " ]
Please provide a description of the function:def decode_search_index(self, index): result = {} result['name'] = bytes_to_str(index.name) if index.HasField('schema'): result['schema'] = bytes_to_str(index.schema) if index.HasField('n_val'): result['n_val'] = index.n_val return result
[ "\n Fills an RpbYokozunaIndex message with the appropriate data.\n\n :param index: a yz index message\n :type index: riak.pb.riak_yokozuna_pb2.RpbYokozunaIndex\n :rtype dict\n " ]
Please provide a description of the function:def encode_timeseries_put(self, tsobj): req = riak.pb.riak_ts_pb2.TsPutReq() req.table = str_to_bytes(tsobj.table.name) if tsobj.columns: raise NotImplementedError("columns are not implemented yet") if tsobj.rows and isinstance(tsobj.rows, list): for row in tsobj.rows: tsr = req.rows.add() # NB: type TsRow if not isinstance(row, list): raise ValueError("TsObject row must be a list of values") for cell in row: tsc = tsr.cells.add() # NB: type TsCell self.encode_to_ts_cell(cell, tsc) else: raise RiakError("TsObject requires a list of rows") mc = riak.pb.messages.MSG_CODE_TS_PUT_REQ rc = riak.pb.messages.MSG_CODE_TS_PUT_RESP return Msg(mc, req.SerializeToString(), rc)
[ "\n Fills an TsPutReq message with the appropriate data and\n metadata from a TsObject.\n\n :param tsobj: a TsObject\n :type tsobj: TsObject\n :param req: the protobuf message to fill\n :type req: riak.pb.riak_ts_pb2.TsPutReq\n " ]
Please provide a description of the function:def decode_timeseries(self, resp, tsobj, convert_timestamp=False): if resp.columns is not None: col_names = [] col_types = [] for col in resp.columns: col_names.append(bytes_to_str(col.name)) col_type = self.decode_timeseries_col_type(col.type) col_types.append(col_type) tsobj.columns = TsColumns(col_names, col_types) tsobj.rows = [] if resp.rows is not None: for row in resp.rows: tsobj.rows.append( self.decode_timeseries_row( row, resp.columns, convert_timestamp))
[ "\n Fills an TsObject with the appropriate data and\n metadata from a TsGetResp / TsQueryResp.\n\n :param resp: the protobuf message from which to process data\n :type resp: riak.pb.riak_ts_pb2.TsQueryRsp or\n riak.pb.riak_ts_pb2.TsGetResp\n :param tsobj: a TsObject\n :type tsobj: TsObject\n :param convert_timestamp: Convert timestamps to datetime objects\n :type tsobj: boolean\n " ]
Please provide a description of the function:def decode_timeseries_row(self, tsrow, tscols=None, convert_timestamp=False): row = [] for i, cell in enumerate(tsrow.cells): col = None if tscols is not None: col = tscols[i] if cell.HasField('varchar_value'): if col and not (col.type == TsColumnType.Value('VARCHAR') or col.type == TsColumnType.Value('BLOB')): raise TypeError('expected VARCHAR or BLOB column') else: row.append(cell.varchar_value) elif cell.HasField('sint64_value'): if col and col.type != TsColumnType.Value('SINT64'): raise TypeError('expected SINT64 column') else: row.append(cell.sint64_value) elif cell.HasField('double_value'): if col and col.type != TsColumnType.Value('DOUBLE'): raise TypeError('expected DOUBLE column') else: row.append(cell.double_value) elif cell.HasField('timestamp_value'): if col and col.type != TsColumnType.Value('TIMESTAMP'): raise TypeError('expected TIMESTAMP column') else: dt = cell.timestamp_value if convert_timestamp: dt = datetime_from_unix_time_millis( cell.timestamp_value) row.append(dt) elif cell.HasField('boolean_value'): if col and col.type != TsColumnType.Value('BOOLEAN'): raise TypeError('expected BOOLEAN column') else: row.append(cell.boolean_value) else: row.append(None) return row
[ "\n Decodes a TsRow into a list\n\n :param tsrow: the protobuf TsRow to decode.\n :type tsrow: riak.pb.riak_ts_pb2.TsRow\n :param tscols: the protobuf TsColumn data to help decode.\n :type tscols: list\n :rtype list\n " ]
Please provide a description of the function:def decode_preflist(self, item): result = {'partition': item.partition, 'node': bytes_to_str(item.node), 'primary': item. primary} return result
[ "\n Decodes a preflist response\n\n :param preflist: a bucket/key preflist\n :type preflist: list of\n riak.pb.riak_kv_pb2.RpbBucketKeyPreflistItem\n :rtype dict\n " ]
Please provide a description of the function:def ping(self): msg_code = riak.pb.messages.MSG_CODE_PING_REQ codec = self._get_codec(msg_code) msg = codec.encode_ping() resp_code, _ = self._request(msg, codec) if resp_code == riak.pb.messages.MSG_CODE_PING_RESP: return True else: return False
[ "\n Ping the remote server\n " ]
Please provide a description of the function:def get_server_info(self): # NB: can't do it this way due to recursion # codec = self._get_codec(ttb_supported=False) codec = PbufCodec() msg = Msg(riak.pb.messages.MSG_CODE_GET_SERVER_INFO_REQ, None, riak.pb.messages.MSG_CODE_GET_SERVER_INFO_RESP) resp_code, resp = self._request(msg, codec) return codec.decode_get_server_info(resp)
[ "\n Get information about the server\n " ]
Please provide a description of the function:def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): msg_code = riak.pb.messages.MSG_CODE_GET_REQ codec = self._get_codec(msg_code) msg = codec.encode_get(robj, r, pr, timeout, basic_quorum, notfound_ok, head_only) resp_code, resp = self._request(msg, codec) return codec.decode_get(robj, resp)
[ "\n Serialize get request and deserialize response\n " ]
Please provide a description of the function:def ts_stream_keys(self, table, timeout=None): msg_code = riak.pb.messages.MSG_CODE_TS_LIST_KEYS_REQ codec = self._get_codec(msg_code) msg = codec.encode_timeseries_listkeysreq(table, timeout) self._send_msg(msg.msg_code, msg.data) return PbufTsKeyStream(self, codec, self._ts_convert_timestamp)
[ "\n Streams keys from a timeseries table, returning an iterator that\n yields lists of keys.\n " ]
Please provide a description of the function:def get_keys(self, bucket, timeout=None): msg_code = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ codec = self._get_codec(msg_code) stream = self.stream_keys(bucket, timeout=timeout) return codec.decode_get_keys(stream)
[ "\n Lists all keys within a bucket.\n " ]
Please provide a description of the function:def stream_keys(self, bucket, timeout=None): msg_code = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ codec = self._get_codec(msg_code) msg = codec.encode_stream_keys(bucket, timeout) self._send_msg(msg.msg_code, msg.data) return PbufKeyStream(self, codec)
[ "\n Streams keys from a bucket, returning an iterator that yields\n lists of keys.\n " ]
Please provide a description of the function:def get_buckets(self, bucket_type=None, timeout=None): msg_code = riak.pb.messages.MSG_CODE_LIST_BUCKETS_REQ codec = self._get_codec(msg_code) msg = codec.encode_get_buckets(bucket_type, timeout, streaming=False) resp_code, resp = self._request(msg, codec) return resp.buckets
[ "\n Serialize bucket listing request and deserialize response\n " ]
Please provide a description of the function:def stream_buckets(self, bucket_type=None, timeout=None): if not self.bucket_stream(): raise NotImplementedError('Streaming list-buckets is not ' 'supported') msg_code = riak.pb.messages.MSG_CODE_LIST_BUCKETS_REQ codec = self._get_codec(msg_code) msg = codec.encode_get_buckets(bucket_type, timeout, streaming=True) self._send_msg(msg.msg_code, msg.data) return PbufBucketStream(self, codec)
[ "\n Stream list of buckets through an iterator\n " ]
Please provide a description of the function:def get_bucket_props(self, bucket): msg_code = riak.pb.messages.MSG_CODE_GET_BUCKET_REQ codec = self._get_codec(msg_code) msg = codec.encode_get_bucket_props(bucket) resp_code, resp = self._request(msg, codec) return codec.decode_bucket_props(resp.props)
[ "\n Serialize bucket property request and deserialize response\n " ]
Please provide a description of the function:def set_bucket_props(self, bucket, props): if not self.pb_all_bucket_props(): for key in props: if key not in ('n_val', 'allow_mult'): raise NotImplementedError('Server only supports n_val and ' 'allow_mult properties over PBC') msg_code = riak.pb.messages.MSG_CODE_SET_BUCKET_REQ codec = self._get_codec(msg_code) msg = codec.encode_set_bucket_props(bucket, props) resp_code, resp = self._request(msg, codec) return True
[ "\n Serialize set bucket property request and deserialize response\n " ]
Please provide a description of the function:def clear_bucket_props(self, bucket): if not self.pb_clear_bucket_props(): return False msg_code = riak.pb.messages.MSG_CODE_RESET_BUCKET_REQ codec = self._get_codec(msg_code) msg = codec.encode_clear_bucket_props(bucket) self._request(msg, codec) return True
[ "\n Clear bucket properties, resetting them to their defaults\n " ]
Please provide a description of the function:def get_bucket_type_props(self, bucket_type): self._check_bucket_types(bucket_type) msg_code = riak.pb.messages.MSG_CODE_GET_BUCKET_TYPE_REQ codec = self._get_codec(msg_code) msg = codec.encode_get_bucket_type_props(bucket_type) resp_code, resp = self._request(msg, codec) return codec.decode_bucket_props(resp.props)
[ "\n Fetch bucket-type properties\n " ]
Please provide a description of the function:def set_bucket_type_props(self, bucket_type, props): self._check_bucket_types(bucket_type) msg_code = riak.pb.messages.MSG_CODE_SET_BUCKET_TYPE_REQ codec = self._get_codec(msg_code) msg = codec.encode_set_bucket_type_props(bucket_type, props) resp_code, resp = self._request(msg, codec) return True
[ "\n Set bucket-type properties\n " ]
Please provide a description of the function:def get_preflist(self, bucket, key): if not self.preflists(): raise NotImplementedError("fetching preflists is not supported.") msg_code = riak.pb.messages.MSG_CODE_GET_BUCKET_KEY_PREFLIST_REQ codec = self._get_codec(msg_code) msg = codec.encode_get_preflist(bucket, key) resp_code, resp = self._request(msg, codec) return [codec.decode_preflist(item) for item in resp.preflist]
[ "\n Get the preflist for a bucket/key\n\n :param bucket: Riak Bucket\n :type bucket: :class:`~riak.bucket.RiakBucket`\n :param key: Riak Key\n :type key: string\n :rtype: list of dicts\n " ]
Please provide a description of the function:def print_report(label, user, system, real): print("{:<12s} {:12f} {:12f} ( {:12f} )".format(label, user, system, real))
[ "\n Prints the report of one step of a benchmark.\n " ]
Please provide a description of the function:def next(self): if self.count == 0: raise StopIteration elif self.count > 1: print_rehearsal_header() else: if self.rehearse: gc.collect() print("-" * 59) print() print_header() self.count -= 1 return self
[ "\n Runs the next iteration of the benchmark.\n " ]
Please provide a description of the function:def add(self, arg1, arg2=None, arg3=None, bucket_type=None): from riak.riak_object import RiakObject if (arg2 is None) and (arg3 is None): if isinstance(arg1, RiakObject): return self.add_object(arg1) else: return self.add_bucket(arg1, bucket_type) else: return self.add_bucket_key_data(arg1, arg2, arg3, bucket_type)
[ "\n Add inputs to a map/reduce operation. This method takes three\n different forms, depending on the provided inputs. You can\n specify either a RiakObject, a string bucket name, or a bucket,\n key, and additional arg.\n\n :param arg1: the object or bucket to add\n :type arg1: RiakObject, string\n :param arg2: a key or list of keys to add (if a bucket is\n given in arg1)\n :type arg2: string, list, None\n :param arg3: key data for this input (must be convertible to JSON)\n :type arg3: string, list, dict, None\n :param bucket_type: Optional name of a bucket type\n :type bucket_type: string, None\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def add_object(self, obj): return self.add_bucket_key_data(obj._bucket._name, obj._key, None)
[ "\n Adds a RiakObject to the inputs.\n\n :param obj: the object to add\n :type obj: RiakObject\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def add_bucket_key_data(self, bucket, key, data, bucket_type=None): if self._input_mode == 'bucket': raise ValueError('Already added a bucket, can\'t add an object.') elif self._input_mode == 'query': raise ValueError('Already added a query, can\'t add an object.') else: if isinstance(key, Iterable) and \ not isinstance(key, string_types): if bucket_type is not None: for k in key: self._inputs.append([bucket, k, data, bucket_type]) else: for k in key: self._inputs.append([bucket, k, data]) else: if bucket_type is not None: self._inputs.append([bucket, key, data, bucket_type]) else: self._inputs.append([bucket, key, data]) return self
[ "\n Adds a bucket/key/keydata triple to the inputs.\n\n :param bucket: the bucket\n :type bucket: string\n :param key: the key or list of keys\n :type key: string\n :param data: the key-specific data\n :type data: string, list, dict, None\n :param bucket_type: Optional name of a bucket type\n :type bucket_type: string, None\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def add_bucket(self, bucket, bucket_type=None): if not riak.disable_list_exceptions: raise riak.ListError() self._input_mode = 'bucket' if isinstance(bucket, riak.RiakBucket): if bucket.bucket_type.is_default(): self._inputs = {'bucket': bucket.name} else: self._inputs = {'bucket': [bucket.bucket_type.name, bucket.name]} elif bucket_type is not None and bucket_type != "default": self._inputs = {'bucket': [bucket_type, bucket]} else: self._inputs = {'bucket': bucket} return self
[ "\n Adds all keys in a bucket to the inputs.\n\n :param bucket: the bucket\n :type bucket: string\n :param bucket_type: Optional name of a bucket type\n :type bucket_type: string, None\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def add_key_filters(self, key_filters): if self._input_mode == 'query': raise ValueError('Key filters are not supported in a query.') self._key_filters.extend(key_filters) return self
[ "\n Adds key filters to the inputs.\n\n :param key_filters: a list of filters\n :type key_filters: list\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def add_key_filter(self, *args): if self._input_mode == 'query': raise ValueError('Key filters are not supported in a query.') self._key_filters.append(args) return self
[ "\n Add a single key filter to the inputs.\n\n :param args: a filter\n :type args: list\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def search(self, index, query): self._input_mode = 'query' self._inputs = {'bucket': index, 'index': index, 'query': query} return self
[ "\n Begin a map/reduce operation using a Search. This command will\n return an error unless executed against a Riak Search cluster.\n\n :param index: The Solr index used in the search\n :type index: string\n :param query: The search query\n :type query: string\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def index(self, bucket, index, startkey, endkey=None, bucket_type=None): self._input_mode = 'query' if endkey is None: self._inputs = {'bucket': bucket, 'index': index, 'key': startkey} else: self._inputs = {'bucket': bucket, 'index': index, 'start': startkey, 'end': endkey} if bucket_type is not None: self._inputs['bucket'] = [bucket_type, bucket] return self
[ "\n Begin a map/reduce operation using a Secondary Index\n query.\n\n :param bucket: The bucket over which to perform the query\n :type bucket: string\n :param index: The index to use for query\n :type index: string\n :param startkey: The start key of index range, or the\n value which all entries must equal\n :type startkey: string, integer\n :param endkey: The end key of index range (if doing a range query)\n :type endkey: string, integer, None\n :param bucket_type: Optional name of a bucket type\n :type bucket_type: string, None\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def link(self, bucket='_', tag='_', keep=False): self._phases.append(RiakLinkPhase(bucket, tag, keep)) return self
[ "\n Add a link phase to the map/reduce operation.\n\n :param bucket: Bucket name (default '_', which means all\n buckets)\n :type bucket: string\n :param tag: Tag (default '_', which means any tag)\n :type tag: string\n :param keep: Flag whether to keep results from this stage in\n the map/reduce. (default False, unless this is the last step\n in the phase)\n :type keep: boolean\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def map(self, function, options=None): if options is None: options = dict() if isinstance(function, list): language = 'erlang' else: language = 'javascript' mr = RiakMapReducePhase('map', function, options.get('language', language), options.get('keep', False), options.get('arg', None)) self._phases.append(mr) return self
[ "\n Add a map phase to the map/reduce operation.\n\n :param function: Either a named Javascript function (ie:\n 'Riak.mapValues'), or an anonymous javascript function (ie:\n 'function(...) ... ' or an array ['erlang_module',\n 'function'].\n :type function: string, list\n :param options: phase options, containing 'language', 'keep'\n flag, and/or 'arg'.\n :type options: dict\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def run(self, timeout=None): query, link_results_flag = self._normalize_query() try: result = self._client.mapred(self._inputs, query, timeout) except riak.RiakError as e: if 'worker_startup_failed' in e.value: for phase in self._phases: if phase._language == 'erlang': if type(phase._function) is str: raise riak.RiakError( 'May have tried erlang strfun ' 'when not allowed\n' 'original error: ' + e.value) raise e # If the last phase is NOT a link phase, then return the result. if not (link_results_flag or isinstance(self._phases[-1], RiakLinkPhase)): return result # If there are no results, then return an empty list. if result is None: return [] # Otherwise, if the last phase IS a link phase, then convert the # results to link tuples. a = [] for r in result: if (len(r) == 2): link = (r[0], r[1], None) elif (len(r) == 3): link = (r[0], r[1], r[2]) a.append(link) return a
[ "\n Run the map/reduce operation synchronously. Returns a list of\n results, or a list of links if the last phase is a link phase.\n Shortcut for :meth:`riak.client.RiakClient.mapred`.\n\n :param timeout: Timeout in milliseconds\n :type timeout: integer, None\n :rtype: list\n " ]
Please provide a description of the function:def stream(self, timeout=None): query, lrf = self._normalize_query() return self._client.stream_mapred(self._inputs, query, timeout)
[ "\n Streams the MapReduce query (returns an iterator). Shortcut\n for :meth:`riak.client.RiakClient.stream_mapred`.\n\n :param timeout: Timeout in milliseconds\n :type timeout: integer\n :rtype: iterator that yields (phase_num, data) tuples\n " ]
Please provide a description of the function:def reduce_sort(self, js_cmp=None, options=None): if options is None: options = dict() if js_cmp: options['arg'] = js_cmp return self.reduce("Riak.reduceSort", options=options)
[ "\n Adds the Javascript built-in ``Riak.reduceSort`` to the query\n as a reduce phase.\n\n :param js_cmp: A Javascript comparator function as specified by\n Array.sort()\n :type js_cmp: string\n :param options: phase options, containing 'language', 'keep'\n flag, and/or 'arg'.\n :type options: dict\n " ]
Please provide a description of the function:def reduce_limit(self, limit, options=None): if options is None: options = dict() options['arg'] = limit # reduceLimit is broken in riak_kv code = return self.reduce(code, options=options)
[ "\n Adds the Javascript built-in ``Riak.reduceLimit`` to the query\n as a reduce phase.\n\n :param limit: the maximum number of results to return\n :type limit: integer\n :param options: phase options, containing 'language', 'keep'\n flag, and/or 'arg'.\n :type options: dict\n ", "function(value, arg) {\n return value.slice(0, arg);\n }" ]
Please provide a description of the function:def reduce_slice(self, start, end, options=None): if options is None: options = dict() options['arg'] = [start, end] return self.reduce("Riak.reduceSlice", options=options)
[ "\n Adds the Javascript built-in ``Riak.reduceSlice`` to the\n query as a reduce phase.\n\n :param start: the beginning of the slice\n :type start: integer\n :param end: the end of the slice\n :type end: integer\n :param options: phase options, containing 'language', 'keep'\n flag, and/or 'arg'.\n :type options: dict\n " ]
Please provide a description of the function:def to_array(self): stepdef = {'keep': self._keep, 'language': self._language, 'arg': self._arg} if self._language == 'javascript': if isinstance(self._function, list): stepdef['bucket'] = self._function[0] stepdef['key'] = self._function[1] elif isinstance(self._function, string_types): if ("{" in self._function): stepdef['source'] = self._function else: stepdef['name'] = self._function elif (self._language == 'erlang' and isinstance(self._function, list)): stepdef['module'] = self._function[0] stepdef['function'] = self._function[1] elif (self._language == 'erlang' and isinstance(self._function, string_types)): stepdef['source'] = self._function return {self._type: stepdef}
[ "\n Convert the RiakMapReducePhase to a format that can be output\n into JSON. Used internally.\n\n :rtype: dict\n " ]
Please provide a description of the function:def to_array(self): stepdef = {'bucket': self._bucket, 'tag': self._tag, 'keep': self._keep} return {'link': stepdef}
[ "\n Convert the RiakLinkPhase to a format that can be output into\n JSON. Used internally.\n " ]
Please provide a description of the function:def add(self, arg1, arg2=None, arg3=None, bucket_type=None): mr = RiakMapReduce(self) return mr.add(arg1, arg2, arg3, bucket_type)
[ "\n Start assembling a Map/Reduce operation. A shortcut for\n :func:`RiakMapReduce.add`.\n\n :param arg1: the object or bucket to add\n :type arg1: RiakObject, string\n :param arg2: a key or list of keys to add (if a bucket is\n given in arg1)\n :type arg2: string, list, None\n :param arg3: key data for this input (must be convertible to JSON)\n :type arg3: string, list, dict, None\n :param bucket_type: Optional name of a bucket type\n :type bucket_type: string, None\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def index(self, bucket, index, startkey, endkey=None, bucket_type=None): mr = RiakMapReduce(self) return mr.index(bucket, index, startkey, endkey, bucket_type)
[ "\n Start assembling a Map/Reduce operation based on secondary\n index query results.\n\n :param bucket: The bucket over which to perform the query\n :type bucket: string\n :param index: The index to use for query\n :type index: string\n :param startkey: The start key of index range, or the\n value which all entries must equal\n :type startkey: string, integer\n :param endkey: The end key of index range (if doing a range query)\n :type endkey: string, integer, None\n :param bucket_type: Optional name of a bucket type\n :type bucket_type: string, None\n :rtype: :class:`RiakMapReduce`\n " ]
Please provide a description of the function:def add_index(self, field, value): if field[-4:] not in ("_bin", "_int"): raise RiakError("Riak 2i fields must end with either '_bin'" " or '_int'.") self.indexes.add((field, value)) return self._robject
[ "\n add_index(field, value)\n\n Tag this object with the specified field/value pair for\n indexing.\n\n :param field: The index field.\n :type field: string\n :param value: The index value.\n :type value: string or integer\n :rtype: :class:`RiakObject <riak.riak_object.RiakObject>`\n " ]
Please provide a description of the function:def remove_index(self, field=None, value=None): if not field and not value: self.indexes.clear() elif field and not value: for index in [x for x in self.indexes if x[0] == field]: self.indexes.remove(index) elif field and value: self.indexes.remove((field, value)) else: raise RiakError("Cannot pass value without a field" " name while removing index") return self._robject
[ "\n remove_index(field=None, value=None)\n\n Remove the specified field/value pair as an index on this\n object.\n\n :param field: The index field.\n :type field: string\n :param value: The index value.\n :type value: string or integer\n :rtype: :class:`RiakObject <riak.riak_object.RiakObject>`\n " ]
Please provide a description of the function:def set_index(self, field, value): to_rem = set((x for x in self.indexes if x[0] == field)) self.indexes.difference_update(to_rem) return self.add_index(field, value)
[ "\n set_index(field, value)\n\n Works like :meth:`add_index`, but ensures that there is only\n one index on given field. If other found, then removes it\n first.\n\n :param field: The index field.\n :type field: string\n :param value: The index value.\n :type value: string or integer\n :rtype: :class:`RiakObject <riak.riak_object.RiakObject>`\n " ]
Please provide a description of the function:def add_link(self, obj, tag=None): if isinstance(obj, tuple): newlink = obj else: newlink = (obj.bucket.name, obj.key, tag) self.links.append(newlink) return self._robject
[ "\n add_link(obj, tag=None)\n\n Add a link to a RiakObject.\n\n :param obj: Either a RiakObject or 3 item link tuple consisting\n of (bucket, key, tag).\n :type obj: mixed\n :param tag: Optional link tag. Defaults to bucket name. It is ignored\n if ``obj`` is a 3 item link tuple.\n :type tag: string\n :rtype: :class:`RiakObject <riak.riak_object.RiakObject>`\n " ]
Please provide a description of the function:def last_written_resolver(riak_object): riak_object.siblings = [max(riak_object.siblings, key=lambda x: x.last_modified), ]
[ "\n A conflict-resolution function that resolves by selecting the most\n recently-modified sibling by timestamp.\n\n :param riak_object: an object-in-conflict that will be resolved\n :type riak_object: :class:`RiakObject <riak.riak_object.RiakObject>`\n " ]
Please provide a description of the function:def verify_cb(conn, cert, errnum, depth, ok): if not ok: raise SecurityError("Could not verify CA certificate {0}" .format(cert.get_subject())) return ok
[ "\n The default OpenSSL certificate verification callback.\n " ]
Please provide a description of the function:def next_page(self, timeout=None, stream=None): if not self.continuation: raise ValueError("Cannot get next index page, no continuation") if stream is not None: self.stream = stream args = {'bucket': self.bucket, 'index': self.index, 'startkey': self.startkey, 'endkey': self.endkey, 'return_terms': self.return_terms, 'max_results': self.max_results, 'continuation': self.continuation, 'timeout': timeout, 'term_regex': self.term_regex} if self.stream: return self.client.stream_index(**args) else: return self.client.get_index(**args)
[ "\n Fetches the next page using the same parameters as the\n original query.\n\n Note that if streaming was used before, it will be used again\n unless overridden.\n\n :param stream: whether to enable streaming. `True` enables,\n `False` disables, `None` uses previous value.\n :type stream: boolean\n :param timeout: a timeout value in milliseconds, or 'infinity'\n :type timeout: int\n " ]
Please provide a description of the function:def _inject_term(self, result): if self._should_inject_term(result): if type(result) is list: return [(self.startkey, r) for r in result] else: return (self.startkey, result) else: return result
[ "\n Upgrades a result (streamed or not) to include the index term\n when an equality query is used with return_terms.\n " ]
Please provide a description of the function:def is_retryable(err): if isinstance(err, ConnectionClosed): # NB: only retryable if we're not mid-streaming if err.mid_stream: return False else: return True elif isinstance(err, socket.error): code = err.args[0] return code in CONN_CLOSED_ERRORS else: return False
[ "\n Determines if the given exception is something that is\n network/socket-related and should thus cause the TCP connection to\n close and the operation retried on another node.\n\n :rtype: boolean\n " ]
Please provide a description of the function:def _validate_timeout(timeout, infinity_ok=False): if timeout is None: return if timeout == 'infinity': if infinity_ok: return else: raise ValueError( 'timeout must be a positive integer ' '("infinity" is not valid)') if isinstance(timeout, six.integer_types) and timeout > 0: return raise ValueError('timeout must be a positive integer')
[ "\n Raises an exception if the given timeout is an invalid value.\n " ]
Please provide a description of the function:def get_buckets(self, transport, bucket_type=None, timeout=None): if not riak.disable_list_exceptions: raise ListError() _validate_timeout(timeout) if bucket_type: bucketfn = self._bucket_type_bucket_builder else: bucketfn = self._default_type_bucket_builder return [bucketfn(bytes_to_str(name), bucket_type) for name in transport.get_buckets(bucket_type=bucket_type, timeout=timeout)]
[ "\n get_buckets(bucket_type=None, timeout=None)\n\n Get the list of buckets as :class:`RiakBucket\n <riak.bucket.RiakBucket>` instances.\n\n .. warning:: Do not use this in production, as it requires\n traversing through all keys stored in a cluster.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param bucket_type: the optional containing bucket type\n :type bucket_type: :class:`~riak.bucket.BucketType`\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :rtype: list of :class:`RiakBucket <riak.bucket.RiakBucket>`\n instances\n " ]
Please provide a description of the function:def stream_buckets(self, bucket_type=None, timeout=None): if not riak.disable_list_exceptions: raise ListError() _validate_timeout(timeout) if bucket_type: bucketfn = self._bucket_type_bucket_builder else: bucketfn = self._default_type_bucket_builder def make_op(transport): return transport.stream_buckets( bucket_type=bucket_type, timeout=timeout) for bucket_list in self._stream_with_retry(make_op): bucket_list = [bucketfn(bytes_to_str(name), bucket_type) for name in bucket_list] if len(bucket_list) > 0: yield bucket_list
[ "\n Streams the list of buckets. This is a generator method that\n should be iterated over.\n\n .. warning:: Do not use this in production, as it requires\n traversing through all keys stored in a cluster.\n\n The caller should explicitly close the returned iterator,\n either using :func:`contextlib.closing` or calling ``close()``\n explicitly. Consuming the entire iterator will also close the\n stream. If it does not, the associated connection might not be\n returned to the pool. Example::\n\n from contextlib import closing\n\n # Using contextlib.closing\n with closing(client.stream_buckets()) as buckets:\n for bucket_list in buckets:\n do_something(bucket_list)\n\n # Explicit close()\n stream = client.stream_buckets()\n for bucket_list in stream:\n do_something(bucket_list)\n stream.close()\n\n :param bucket_type: the optional containing bucket type\n :type bucket_type: :class:`~riak.bucket.BucketType`\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :rtype: iterator that yields lists of :class:`RiakBucket\n <riak.bucket.RiakBucket>` instances\n\n " ]
Please provide a description of the function:def get_index(self, transport, bucket, index, startkey, endkey=None, return_terms=None, max_results=None, continuation=None, timeout=None, term_regex=None): _validate_timeout(timeout, infinity_ok=True) page = IndexPage(self, bucket, index, startkey, endkey, return_terms, max_results, term_regex) results, continuation = transport.get_index( bucket, index, startkey, endkey, return_terms=return_terms, max_results=max_results, continuation=continuation, timeout=timeout, term_regex=term_regex) page.results = results page.continuation = continuation return page
[ "\n get_index(bucket, index, startkey, endkey=None, return_terms=None,\\\n max_results=None, continuation=None, timeout=None,\\\n term_regex=None)\n\n Queries a secondary index, returning matching keys.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param bucket: the bucket whose index will be queried\n :type bucket: RiakBucket\n :param index: the index to query\n :type index: string\n :param startkey: the sole key to query, or beginning of the query range\n :type startkey: string, integer\n :param endkey: the end of the query range (optional if equality)\n :type endkey: string, integer\n :param return_terms: whether to include the secondary index value\n :type return_terms: boolean\n :param max_results: the maximum number of results to return (page size)\n :type max_results: integer\n :param continuation: the opaque continuation returned from a\n previous paginated request\n :type continuation: string\n :param timeout: a timeout value in milliseconds, or 'infinity'\n :type timeout: int\n :param term_regex: a regular expression used to filter index terms\n :type term_regex: string\n :rtype: :class:`~riak.client.index_page.IndexPage`\n " ]
Please provide a description of the function:def paginate_index(self, bucket, index, startkey, endkey=None, max_results=1000, return_terms=None, continuation=None, timeout=None, term_regex=None): page = self.get_index(bucket, index, startkey, endkey=endkey, max_results=max_results, return_terms=return_terms, continuation=continuation, timeout=timeout, term_regex=term_regex) yield page while page.has_next_page(): page = page.next_page() yield page
[ "\n Iterates over a paginated index query. This is equivalent to calling\n :meth:`get_index` and then successively calling\n :meth:`~riak.client.index_page.IndexPage.next_page` until all\n results are exhausted.\n\n Because limiting the result set is necessary to invoke pagination,\n the ``max_results`` option has a default of ``1000``.\n\n :param bucket: the bucket whose index will be queried\n :type bucket: RiakBucket\n :param index: the index to query\n :type index: string\n :param startkey: the sole key to query, or beginning of the query range\n :type startkey: string, integer\n :param endkey: the end of the query range (optional if equality)\n :type endkey: string, integer\n :param return_terms: whether to include the secondary index value\n :type return_terms: boolean\n :param max_results: the maximum number of results to return (page\n size), defaults to 1000\n :type max_results: integer\n :param continuation: the opaque continuation returned from a\n previous paginated request\n :type continuation: string\n :param timeout: a timeout value in milliseconds, or 'infinity'\n :type timeout: int\n :param term_regex: a regular expression used to filter index terms\n :type term_regex: string\n :rtype: generator over instances of\n :class:`~riak.client.index_page.IndexPage`\n\n " ]
Please provide a description of the function:def stream_index(self, bucket, index, startkey, endkey=None, return_terms=None, max_results=None, continuation=None, timeout=None, term_regex=None): # TODO FUTURE: implement "retry on connection closed" # as in stream_mapred _validate_timeout(timeout, infinity_ok=True) page = IndexPage(self, bucket, index, startkey, endkey, return_terms, max_results, term_regex) page.stream = True resource = self._acquire() transport = resource.object page.results = transport.stream_index( bucket, index, startkey, endkey, return_terms=return_terms, max_results=max_results, continuation=continuation, timeout=timeout, term_regex=term_regex) page.results.attach(resource) return page
[ "\n Queries a secondary index, streaming matching keys through an\n iterator.\n\n The caller should explicitly close the returned iterator,\n either using :func:`contextlib.closing` or calling ``close()``\n explicitly. Consuming the entire iterator will also close the\n stream. If it does not, the associated connection might not be\n returned to the pool. Example::\n\n from contextlib import closing\n\n # Using contextlib.closing\n with closing(client.stream_index(mybucket, 'name_bin',\n 'Smith')) as index:\n for key in index:\n do_something(key)\n\n # Explicit close()\n stream = client.stream_index(mybucket, 'name_bin', 'Smith')\n for key in stream:\n do_something(key)\n stream.close()\n\n :param bucket: the bucket whose index will be queried\n :type bucket: RiakBucket\n :param index: the index to query\n :type index: string\n :param startkey: the sole key to query, or beginning of the query range\n :type startkey: string, integer\n :param endkey: the end of the query range (optional if equality)\n :type endkey: string, integer\n :param return_terms: whether to include the secondary index value\n :type return_terms: boolean\n :param max_results: the maximum number of results to return (page size)\n :type max_results: integer\n :param continuation: the opaque continuation returned from a\n previous paginated request\n :type continuation: string\n :param timeout: a timeout value in milliseconds, or 'infinity'\n :type timeout: int\n :param term_regex: a regular expression used to filter index terms\n :type term_regex: string\n :rtype: :class:`~riak.client.index_page.IndexPage`\n\n " ]
Please provide a description of the function:def paginate_stream_index(self, bucket, index, startkey, endkey=None, max_results=1000, return_terms=None, continuation=None, timeout=None, term_regex=None): # TODO FUTURE: implement "retry on connection closed" # as in stream_mapred page = self.stream_index(bucket, index, startkey, endkey=endkey, max_results=max_results, return_terms=return_terms, continuation=continuation, timeout=timeout, term_regex=term_regex) yield page while page.has_next_page(): page = page.next_page() yield page
[ "\n Iterates over a streaming paginated index query. This is equivalent to\n calling :meth:`stream_index` and then successively calling\n :meth:`~riak.client.index_page.IndexPage.next_page` until all\n results are exhausted.\n\n Because limiting the result set is necessary to invoke\n pagination, the ``max_results`` option has a default of ``1000``.\n\n The caller should explicitly close each yielded page, either using\n :func:`contextlib.closing` or calling ``close()`` explicitly. Consuming\n the entire page will also close the stream. If it does not, the\n associated connection might not be returned to the pool. Example::\n\n from contextlib import closing\n\n # Using contextlib.closing\n for page in client.paginate_stream_index(mybucket, 'name_bin',\n 'Smith'):\n with closing(page):\n for key in page:\n do_something(key)\n\n # Explicit close()\n for page in client.paginate_stream_index(mybucket, 'name_bin',\n 'Smith'):\n for key in page:\n do_something(key)\n page.close()\n\n :param bucket: the bucket whose index will be queried\n :type bucket: RiakBucket\n :param index: the index to query\n :type index: string\n :param startkey: the sole key to query, or beginning of the query range\n :type startkey: string, integer\n :param endkey: the end of the query range (optional if equality)\n :type endkey: string, integer\n :param return_terms: whether to include the secondary index value\n :type return_terms: boolean\n :param max_results: the maximum number of results to return (page\n size), defaults to 1000\n :type max_results: integer\n :param continuation: the opaque continuation returned from a\n previous paginated request\n :type continuation: string\n :param timeout: a timeout value in milliseconds, or 'infinity'\n :type timeout: int\n :param term_regex: a regular expression used to filter index terms\n :type term_regex: string\n :rtype: generator over instances of\n :class:`~riak.client.index_page.IndexPage`\n\n " ]
Please provide a description of the function:def set_bucket_props(self, transport, bucket, props): _validate_bucket_props(props) return transport.set_bucket_props(bucket, props)
[ "\n set_bucket_props(bucket, props)\n\n Sets bucket properties for the given bucket.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param bucket: the bucket whose properties will be set\n :type bucket: RiakBucket\n :param props: the properties to set\n :type props: dict\n " ]
Please provide a description of the function:def set_bucket_type_props(self, transport, bucket_type, props): _validate_bucket_props(props) return transport.set_bucket_type_props(bucket_type, props)
[ "\n set_bucket_type_props(bucket_type, props)\n\n Sets properties for the given bucket-type.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param bucket_type: the bucket-type whose properties will be set\n :type bucket_type: BucketType\n :param props: the properties to set\n :type props: dict\n " ]
Please provide a description of the function:def get_keys(self, transport, bucket, timeout=None): if not riak.disable_list_exceptions: raise ListError() _validate_timeout(timeout) return transport.get_keys(bucket, timeout=timeout)
[ "\n get_keys(bucket, timeout=None)\n\n Lists all keys in a bucket.\n\n .. warning:: Do not use this in production, as it requires\n traversing through all keys stored in a cluster.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param bucket: the bucket whose keys are fetched\n :type bucket: RiakBucket\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :rtype: list\n " ]
Please provide a description of the function:def stream_keys(self, bucket, timeout=None): if not riak.disable_list_exceptions: raise ListError() _validate_timeout(timeout) def make_op(transport): return transport.stream_keys(bucket, timeout=timeout) for keylist in self._stream_with_retry(make_op): if len(keylist) > 0: if six.PY2: yield keylist else: yield [bytes_to_str(item) for item in keylist]
[ "\n Lists all keys in a bucket via a stream. This is a generator\n method which should be iterated over.\n\n .. warning:: Do not use this in production, as it requires\n traversing through all keys stored in a cluster.\n\n The caller should explicitly close the returned iterator,\n either using :func:`contextlib.closing` or calling ``close()``\n explicitly. Consuming the entire iterator will also close the\n stream. If it does not, the associated connection might\n not be returned to the pool. Example::\n\n from contextlib import closing\n\n # Using contextlib.closing\n with closing(client.stream_keys(mybucket)) as keys:\n for key_list in keys:\n do_something(key_list)\n\n # Explicit close()\n stream = client.stream_keys(mybucket)\n for key_list in stream:\n do_something(key_list)\n stream.close()\n\n :param bucket: the bucket whose properties will be set\n :type bucket: RiakBucket\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :rtype: iterator\n " ]
Please provide a description of the function:def put(self, transport, robj, w=None, dw=None, pw=None, return_body=None, if_none_match=None, timeout=None): _validate_timeout(timeout) return transport.put(robj, w=w, dw=dw, pw=pw, return_body=return_body, if_none_match=if_none_match, timeout=timeout)
[ "\n put(robj, w=None, dw=None, pw=None, return_body=None,\\\n if_none_match=None, timeout=None)\n\n Stores an object in the Riak cluster.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param robj: the object to store\n :type robj: RiakObject\n :param w: the write quorum\n :type w: integer, string, None\n :param dw: the durable write quorum\n :type dw: integer, string, None\n :param pw: the primary write quorum\n :type pw: integer, string, None\n :param return_body: whether to return the resulting object\n after the write\n :type return_body: boolean\n :param if_none_match: whether to fail the write if the object\n exists\n :type if_none_match: boolean\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n " ]
Please provide a description of the function:def ts_describe(self, transport, table): t = table if isinstance(t, six.string_types): t = Table(self, table) return transport.ts_describe(t)
[ "\n ts_describe(table)\n\n Retrieve a time series table description from the Riak cluster.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param table: The timeseries table.\n :type table: string or :class:`Table <riak.table.Table>`\n :rtype: :class:`TsObject <riak.ts_object.TsObject>`\n " ]
Please provide a description of the function:def ts_get(self, transport, table, key): t = table if isinstance(t, six.string_types): t = Table(self, table) return transport.ts_get(t, key)
[ "\n ts_get(table, key)\n\n Retrieve timeseries value by key\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param table: The timeseries table.\n :type table: string or :class:`Table <riak.table.Table>`\n :param key: The timeseries value's key.\n :type key: list\n :rtype: :class:`TsObject <riak.ts_object.TsObject>`\n " ]
Please provide a description of the function:def ts_query(self, transport, table, query, interpolations=None): t = table if isinstance(t, six.string_types): t = Table(self, table) return transport.ts_query(t, query, interpolations)
[ "\n ts_query(table, query, interpolations=None)\n\n Queries time series data in the Riak cluster.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param table: The timeseries table.\n :type table: string or :class:`Table <riak.table.Table>`\n :param query: The timeseries query.\n :type query: string\n :rtype: :class:`TsObject <riak.ts_object.TsObject>`\n " ]
Please provide a description of the function:def ts_stream_keys(self, table, timeout=None): if not riak.disable_list_exceptions: raise ListError() t = table if isinstance(t, six.string_types): t = Table(self, table) _validate_timeout(timeout) resource = self._acquire() transport = resource.object stream = transport.ts_stream_keys(t, timeout) stream.attach(resource) try: for keylist in stream: if len(keylist) > 0: yield keylist finally: stream.close()
[ "\n Lists all keys in a time series table via a stream. This is a\n generator method which should be iterated over.\n\n The caller should explicitly close the returned iterator,\n either using :func:`contextlib.closing` or calling ``close()``\n explicitly. Consuming the entire iterator will also close the\n stream. If it does not, the associated connection might\n not be returned to the pool. Example::\n\n from contextlib import closing\n\n # Using contextlib.closing\n with closing(client.ts_stream_keys(mytable)) as keys:\n for key_list in keys:\n do_something(key_list)\n\n # Explicit close()\n stream = client.ts_stream_keys(mytable)\n for key_list in stream:\n do_something(key_list)\n stream.close()\n\n :param table: the table from which to stream keys\n :type table: string or :class:`Table <riak.table.Table>`\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :rtype: iterator\n " ]
Please provide a description of the function:def get(self, transport, robj, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): _validate_timeout(timeout) if not isinstance(robj.key, six.string_types): raise TypeError( 'key must be a string, instead got {0}'.format(repr(robj.key))) return transport.get(robj, r=r, pr=pr, timeout=timeout, basic_quorum=basic_quorum, notfound_ok=notfound_ok, head_only=head_only)
[ "\n get(robj, r=None, pr=None, timeout=None)\n\n Fetches the contents of a Riak object.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param robj: the object to fetch\n :type robj: RiakObject\n :param r: the read quorum\n :type r: integer, string, None\n :param pr: the primary read quorum\n :type pr: integer, string, None\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :param basic_quorum: whether to use the \"basic quorum\" policy\n for not-founds\n :type basic_quorum: bool\n :param notfound_ok: whether to treat not-found responses as successful\n :type notfound_ok: bool\n :param head_only: whether to fetch without value, so only metadata\n (only available on PB transport)\n :type head_only: bool\n " ]
Please provide a description of the function:def delete(self, transport, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): _validate_timeout(timeout) return transport.delete(robj, rw=rw, r=r, w=w, dw=dw, pr=pr, pw=pw, timeout=timeout)
[ "\n delete(robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None,\\\n timeout=None)\n\n Deletes an object from Riak.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param robj: the object to delete\n :type robj: RiakObject\n :param rw: the read/write (delete) quorum\n :type rw: integer, string, None\n :param r: the read quorum\n :type r: integer, string, None\n :param pr: the primary read quorum\n :type pr: integer, string, None\n :param w: the write quorum\n :type w: integer, string, None\n :param dw: the durable write quorum\n :type dw: integer, string, None\n :param pw: the primary write quorum\n :type pw: integer, string, None\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n " ]
Please provide a description of the function:def mapred(self, transport, inputs, query, timeout): _validate_timeout(timeout) return transport.mapred(inputs, query, timeout)
[ "\n mapred(inputs, query, timeout)\n\n Executes a MapReduce query.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param inputs: the input list/structure\n :type inputs: list, dict\n :param query: the list of query phases\n :type query: list\n :param timeout: the query timeout\n :type timeout: integer, None\n :rtype: mixed\n " ]
Please provide a description of the function:def stream_mapred(self, inputs, query, timeout): _validate_timeout(timeout) def make_op(transport): return transport.stream_mapred(inputs, query, timeout) for phase, data in self._stream_with_retry(make_op): yield phase, data
[ "\n Streams a MapReduce query as (phase, data) pairs. This is a\n generator method which should be iterated over.\n\n The caller should explicitly close the returned iterator,\n either using :func:`contextlib.closing` or calling ``close()``\n explicitly. Consuming the entire iterator will also close the\n stream. If it does not, the associated connection might\n not be returned to the pool. Example::\n\n from contextlib import closing\n\n # Using contextlib.closing\n with closing(mymapred.stream()) as results:\n for phase, result in results:\n do_something(phase, result)\n\n # Explicit close()\n stream = mymapred.stream()\n for phase, result in stream:\n do_something(phase, result)\n stream.close()\n\n :param inputs: the input list/structure\n :type inputs: list, dict\n :param query: the list of query phases\n :type query: list\n :param timeout: the query timeout\n :type timeout: integer, None\n :rtype: iterator\n " ]
Please provide a description of the function:def create_search_index(self, transport, index, schema=None, n_val=None, timeout=None): return transport.create_search_index(index, schema, n_val, timeout)
[ "\n create_search_index(index, schema=None, n_val=None)\n\n Create a search index of the given name, and optionally set\n a schema. If no schema is set, the default will be used.\n\n :param index: the name of the index to create\n :type index: string\n :param schema: the schema that this index will follow\n :type schema: string, None\n :param n_val: this indexes N value\n :type n_val: integer, None\n :param timeout: optional timeout (in ms)\n :type timeout: integer, None\n " ]
Please provide a description of the function:def fulltext_search(self, transport, index, query, **params): return transport.search(index, query, **params)
[ "\n fulltext_search(index, query, **params)\n\n Performs a full-text search query.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param index: the bucket/index to search over\n :type index: string\n :param query: the search query\n :type query: string\n :param params: additional query flags\n :type params: dict\n :rtype: dict\n " ]
Please provide a description of the function:def fulltext_delete(self, transport, index, docs=None, queries=None): transport.fulltext_delete(index, docs, queries)
[ "\n fulltext_delete(index, docs=None, queries=None)\n\n .. deprecated:: 2.1.0 (Riak 2.0)\n Manual index maintenance is not supported for\n :ref:`Riak Search 2.0 <yz-label>`.\n\n Removes documents from the full-text index.\n\n .. note:: This request is automatically retried\n :attr:`retries` times if it fails due to network error.\n Only HTTP will be used for this request.\n\n :param index: the bucket/index from which to delete\n :type index: string\n :param docs: a list of documents (with ids)\n :type docs: list\n :param queries: a list of queries to match and delete\n :type queries: list\n " ]
Please provide a description of the function:def multiget(self, pairs, **params): if self._multiget_pool: params['pool'] = self._multiget_pool return riak.client.multi.multiget(self, pairs, **params)
[ "Fetches many keys in parallel via threads.\n\n :param pairs: list of bucket_type/bucket/key tuple triples\n :type pairs: list\n :param params: additional request flags, e.g. r, pr\n :type params: dict\n :rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,\n :class:`Datatypes <riak.datatypes.Datatype>`, or tuples of\n bucket_type, bucket, key, and the exception raised on fetch\n " ]
Please provide a description of the function:def multiput(self, objs, **params): if self._multiput_pool: params['pool'] = self._multiput_pool return riak.client.multi.multiput(self, objs, **params)
[ "\n Stores objects in parallel via threads.\n\n :param objs: the objects to store\n :type objs: list of `RiakObject <riak.riak_object.RiakObject>`\n :param params: additional request flags, e.g. w, dw, pw\n :type params: dict\n :rtype: list of boolean or\n :class:`RiakObjects <riak.riak_object.RiakObject>`,\n " ]
Please provide a description of the function:def get_counter(self, transport, bucket, key, r=None, pr=None, basic_quorum=None, notfound_ok=None): return transport.get_counter(bucket, key, r=r, pr=pr)
[ "get_counter(bucket, key, r=None, pr=None, basic_quorum=None,\\\n notfound_ok=None)\n\n Gets the value of a counter.\n\n .. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are\n deprecated in favor of the :class:`~riak.datatypes.Counter`\n datatype.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param bucket: the bucket of the counter\n :type bucket: RiakBucket\n :param key: the key of the counter\n :type key: string\n :param r: the read quorum\n :type r: integer, string, None\n :param pr: the primary read quorum\n :type pr: integer, string, None\n :param basic_quorum: whether to use the \"basic quorum\" policy\n for not-founds\n :type basic_quorum: bool\n :param notfound_ok: whether to treat not-found responses as successful\n :type notfound_ok: bool\n :rtype: integer\n\n " ]
Please provide a description of the function:def update_counter(self, bucket, key, value, w=None, dw=None, pw=None, returnvalue=False): if not isinstance(value, six.integer_types): raise TypeError("Counter update amount must be an integer") if value == 0: raise ValueError("Cannot increment counter by 0") with self._transport() as transport: return transport.update_counter(bucket, key, value, w=w, dw=dw, pw=pw, returnvalue=returnvalue)
[ "\n update_counter(bucket, key, value, w=None, dw=None, pw=None,\\\n returnvalue=False)\n\n .. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are\n deprecated in favor of the :class:`~riak.datatypes.Counter`\n datatype.\n\n Updates a counter by the given value. This operation is not\n idempotent and so should not be retried automatically.\n\n :param bucket: the bucket of the counter\n :type bucket: RiakBucket\n :param key: the key of the counter\n :type key: string\n :param value: the amount to increment or decrement\n :type value: integer\n :param w: the write quorum\n :type w: integer, string, None\n :param dw: the durable write quorum\n :type dw: integer, string, None\n :param pw: the primary write quorum\n :type pw: integer, string, None\n :param returnvalue: whether to return the updated value of the counter\n :type returnvalue: bool\n " ]
Please provide a description of the function:def fetch_datatype(self, bucket, key, r=None, pr=None, basic_quorum=None, notfound_ok=None, timeout=None, include_context=None): dtype, value, context = self._fetch_datatype( bucket, key, r=r, pr=pr, basic_quorum=basic_quorum, notfound_ok=notfound_ok, timeout=timeout, include_context=include_context) return TYPES[dtype](bucket=bucket, key=key, value=value, context=context)
[ "\n Fetches the value of a Riak Datatype.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param bucket: the bucket of the datatype, which must belong to a\n :class:`~riak.bucket.BucketType`\n :type bucket: :class:`~riak.bucket.RiakBucket`\n :param key: the key of the datatype\n :type key: string\n :param r: the read quorum\n :type r: integer, string, None\n :param pr: the primary read quorum\n :type pr: integer, string, None\n :param basic_quorum: whether to use the \"basic quorum\" policy\n for not-founds\n :type basic_quorum: bool, None\n :param notfound_ok: whether to treat not-found responses as successful\n :type notfound_ok: bool, None\n :param timeout: a timeout value in milliseconds\n :type timeout: int, None\n :param include_context: whether to return the opaque context\n as well as the value, which is useful for removal operations\n on sets and maps\n :type include_context: bool, None\n :rtype: :class:`~riak.datatypes.Datatype`\n " ]
Please provide a description of the function:def update_datatype(self, datatype, w=None, dw=None, pw=None, return_body=None, timeout=None, include_context=None): _validate_timeout(timeout) with self._transport() as transport: return transport.update_datatype(datatype, w=w, dw=dw, pw=pw, return_body=return_body, timeout=timeout, include_context=include_context)
[ "\n Sends an update to a Riak Datatype to the server. This operation is not\n idempotent and so will not be retried automatically.\n\n :param datatype: the datatype with pending updates\n :type datatype: :class:`~riak.datatypes.Datatype`\n :param w: the write quorum\n :type w: integer, string, None\n :param dw: the durable write quorum\n :type dw: integer, string, None\n :param pw: the primary write quorum\n :type pw: integer, string, None\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :param include_context: whether to return the opaque context\n as well as the value, which is useful for removal operations\n on sets and maps\n :type include_context: bool\n :rtype: tuple of datatype, opaque value and opaque context\n\n " ]
Please provide a description of the function:def _fetch_datatype(self, transport, bucket, key, r=None, pr=None, basic_quorum=None, notfound_ok=None, timeout=None, include_context=None): _validate_timeout(timeout) return transport.fetch_datatype(bucket, key, r=r, pr=pr, basic_quorum=basic_quorum, notfound_ok=notfound_ok, timeout=timeout, include_context=include_context)
[ "\n _fetch_datatype(bucket, key, r=None, pr=None, basic_quorum=None,\n notfound_ok=None, timeout=None, include_context=None)\n\n\n Fetches the value of a Riak Datatype as raw data. This is used\n internally to update already reified Datatype objects. Use the\n public version to fetch a reified type.\n\n .. note:: This request is automatically retried :attr:`retries`\n times if it fails due to network error.\n\n :param bucket: the bucket of the datatype, which must belong to a\n :class:`~riak.BucketType`\n :type bucket: RiakBucket\n :param key: the key of the datatype\n :type key: string, None\n :param r: the read quorum\n :type r: integer, string, None\n :param pr: the primary read quorum\n :type pr: integer, string, None\n :param basic_quorum: whether to use the \"basic quorum\" policy\n for not-founds\n :type basic_quorum: bool\n :param notfound_ok: whether to treat not-found responses as successful\n :type notfound_ok: bool\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :param include_context: whether to return the opaque context\n as well as the value, which is useful for removal operations\n on sets and maps\n :type include_context: bool\n :rtype: tuple of type, value and context\n " ]
Please provide a description of the function:def _non_connect_send_recv(self, msg_code, data=None): self._non_connect_send_msg(msg_code, data) return self._recv_msg()
[ "\n Similar to self._send_recv, but doesn't try to initiate a connection,\n thus preventing an infinite loop.\n " ]
Please provide a description of the function:def _non_connect_send_msg(self, msg_code, data): try: self._socket.sendall(self._encode_msg(msg_code, data)) except (IOError, socket.error) as e: if e.errno == errno.EPIPE: raise ConnectionClosed(e) else: raise
[ "\n Similar to self._send, but doesn't try to initiate a connection,\n thus preventing an infinite loop.\n " ]
Please provide a description of the function:def _init_security(self): if not self._starttls(): raise SecurityError("Could not start TLS connection") # _ssh_handshake() will throw an exception upon failure self._ssl_handshake() if not self._auth(): raise SecurityError("Could not authorize connection")
[ "\n Initialize a secure connection to the server.\n " ]
Please provide a description of the function:def _starttls(self): resp_code, _ = self._non_connect_send_recv( riak.pb.messages.MSG_CODE_START_TLS) if resp_code == riak.pb.messages.MSG_CODE_START_TLS: return True else: return False
[ "\n Exchange a STARTTLS message with Riak to initiate secure communications\n return True is Riak responds with a STARTTLS response, False otherwise\n " ]
Please provide a description of the function:def _auth(self): codec = PbufCodec() username = self._client._credentials.username password = self._client._credentials.password if not password: password = '' msg = codec.encode_auth(username, password) resp_code, _ = self._non_connect_send_recv_msg(msg) if resp_code == riak.pb.messages.MSG_CODE_AUTH_RESP: return True else: return False
[ "\n Perform an authorization request against Riak\n returns True upon success, False otherwise\n Note: Riak will sleep for a short period of time upon a failed\n auth request/response to prevent denial of service attacks\n " ]
Please provide a description of the function:def _recv_msg(self, mid_stream=False): try: msgbuf = self._recv_pkt() except BadResource as e: e.mid_stream = mid_stream raise except socket.timeout as e: # A timeout can leave the socket in an inconsistent state because # it might still receive the data later and mix up with a # subsequent request. # https://github.com/basho/riak-python-client/issues/425 raise BadResource(e, mid_stream) mv = memoryview(msgbuf) mcb = mv[0:1] if self.bytes_required: mcb = mcb.tobytes() try: msg_code, = struct.unpack("B", mcb) except struct.error: # NB: Python 2.7.3 requires this # http://bugs.python.org/issue10212 msg_code, = struct.unpack("B", mv[0:1].tobytes()) self.bytes_required = True data = mv[1:].tobytes() return (msg_code, data)
[ "\n :param mid_stream: are we receiving in a streaming operation?\n :type mid_stream: boolean\n " ]
Please provide a description of the function:def close(self): if self._socket: if USE_STDLIB_SSL: # NB: Python 2.7.8 and earlier does not have a compatible # shutdown() method due to the SSL lib try: self._socket.shutdown(socket.SHUT_RDWR) except EnvironmentError: # NB: sometimes these exceptions are raised if the initial # connection didn't succeed correctly, or if shutdown() is # called after the connection dies logging.debug('Exception occurred while shutting ' 'down socket.', exc_info=True) self._socket.close() del self._socket
[ "\n Closes the underlying socket of the PB connection.\n " ]
Please provide a description of the function:def content_property(name, doc=None): def _setter(self, value): if len(self.siblings) == 0: # In this case, assume that what the user wants is to # create a new sibling inside an empty object. self.siblings = [RiakContent(self)] if len(self.siblings) != 1: raise ConflictError() setattr(self.siblings[0], name, value) def _getter(self): if len(self.siblings) == 0: return if len(self.siblings) != 1: raise ConflictError() return getattr(self.siblings[0], name) return property(_getter, _setter, doc=doc)
[ "\n Delegates a property to the first sibling in a RiakObject, raising\n an error when the object is in conflict.\n " ]
Please provide a description of the function:def content_method(name): def _delegate(self, *args, **kwargs): if len(self.siblings) != 1: raise ConflictError() return getattr(self.siblings[0], name).__call__(*args, **kwargs) _delegate.__doc__ = getattr(RiakContent, name).__doc__ return _delegate
[ "\n Delegates a method to the first sibling in a RiakObject, raising\n an error when the object is in conflict.\n " ]
Please provide a description of the function:def store(self, w=None, dw=None, pw=None, return_body=True, if_none_match=False, timeout=None): if len(self.siblings) != 1: raise ConflictError("Attempting to store an invalid object, " "resolve the siblings first") self.client.put(self, w=w, dw=dw, pw=pw, return_body=return_body, if_none_match=if_none_match, timeout=timeout) return self
[ "\n Store the object in Riak. When this operation completes, the\n object could contain new metadata and possibly new data if Riak\n contains a newer version of the object according to the object's\n vector clock.\n\n :param w: W-value, wait for this many partitions to respond\n before returning to client.\n :type w: integer\n :param dw: DW-value, wait for this many partitions to\n confirm the write before returning to client.\n :type dw: integer\n\n :param pw: PW-value, require this many primary partitions to\n be available before performing the put\n :type pw: integer\n :param return_body: if the newly stored object should be\n retrieved\n :type return_body: bool\n :param if_none_match: Should the object be stored only if\n there is no key previously defined\n :type if_none_match: bool\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :rtype: :class:`RiakObject` " ]
Please provide a description of the function:def reload(self, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): self.client.get(self, r=r, pr=pr, timeout=timeout, head_only=head_only) return self
[ "\n Reload the object from Riak. When this operation completes, the\n object could contain new metadata and a new value, if the object\n was updated in Riak since it was last retrieved.\n\n .. note:: Even if the key is not found in Riak, this will\n return a :class:`RiakObject`. Check the :attr:`exists`\n property to see if the key was found.\n\n :param r: R-Value, wait for this many partitions to respond\n before returning to client.\n :type r: integer\n :param pr: PR-value, require this many primary partitions to\n be available before performing the read that\n precedes the put\n :type pr: integer\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :param basic_quorum: whether to use the \"basic quorum\" policy\n for not-founds\n :type basic_quorum: bool\n :param notfound_ok: whether to treat not-found responses as successful\n :type notfound_ok: bool\n :param head_only: whether to fetch without value, so only metadata\n (only available on PB transport)\n :type head_only: bool\n :rtype: :class:`RiakObject`\n " ]
Please provide a description of the function:def delete(self, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): self.client.delete(self, r=r, w=w, dw=dw, pr=pr, pw=pw, timeout=timeout) self.clear() return self
[ "\n Delete this object from Riak.\n\n :param r: R-value, wait for this many partitions to read object\n before performing the put\n :type r: integer\n :param w: W-value, wait for this many partitions to respond\n before returning to client.\n :type w: integer\n :param dw: DW-value, wait for this many partitions to\n confirm the write before returning to client.\n :type dw: integer\n :param pr: PR-value, require this many primary partitions to\n be available before performing the read that\n precedes the put\n :type pr: integer\n :param pw: PW-value, require this many primary partitions to\n be available before performing the put\n :type pw: integer\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :rtype: :class:`RiakObject`\n " ]
Please provide a description of the function:def add(self, arg1, arg2=None, arg3=None, bucket_type=None): mr = RiakMapReduce(self.client) mr.add(self.bucket.name, self.key, bucket_type=bucket_type) return mr.add(arg1, arg2, arg3, bucket_type)
[ "\n Start assembling a Map/Reduce operation.\n A shortcut for :meth:`~riak.mapreduce.RiakMapReduce.add`.\n\n :param arg1: the object or bucket to add\n :type arg1: RiakObject, string\n :param arg2: a key or list of keys to add (if a bucket is\n given in arg1)\n :type arg2: string, list, None\n :param arg3: key data for this input (must be convertible to JSON)\n :type arg3: string, list, dict, None\n :param bucket_type: Optional name of a bucket type\n :type bucket_type: string, None\n :rtype: :class:`~riak.mapreduce.RiakMapReduce`\n " ]