repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
Unidata/siphon
siphon/ncss.py
combine_xml_points
def combine_xml_points(l, units, handle_units): """Combine multiple Point tags into an array.""" ret = {} for item in l: for key, value in item.items(): ret.setdefault(key, []).append(value) for key, value in ret.items(): if key != 'date': ret[key] = handle_units(value, units.get(key, None)) return ret
python
def combine_xml_points(l, units, handle_units): """Combine multiple Point tags into an array.""" ret = {} for item in l: for key, value in item.items(): ret.setdefault(key, []).append(value) for key, value in ret.items(): if key != 'date': ret[key] = handle_units(value, units.get(key, None)) return ret
[ "def", "combine_xml_points", "(", "l", ",", "units", ",", "handle_units", ")", ":", "ret", "=", "{", "}", "for", "item", "in", "l", ":", "for", "key", ",", "value", "in", "item", ".", "items", "(", ")", ":", "ret", ".", "setdefault", "(", "key", ",", "[", "]", ")", ".", "append", "(", "value", ")", "for", "key", ",", "value", "in", "ret", ".", "items", "(", ")", ":", "if", "key", "!=", "'date'", ":", "ret", "[", "key", "]", "=", "handle_units", "(", "value", ",", "units", ".", "get", "(", "key", ",", "None", ")", ")", "return", "ret" ]
Combine multiple Point tags into an array.
[ "Combine", "multiple", "Point", "tags", "into", "an", "array", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L334-L345
Unidata/siphon
siphon/ncss.py
parse_xml_dataset
def parse_xml_dataset(elem, handle_units): """Create a netCDF-like dataset from XML data.""" points, units = zip(*[parse_xml_point(p) for p in elem.findall('point')]) # Group points by the contents of each point datasets = {} for p in points: datasets.setdefault(tuple(p), []).append(p) all_units = combine_dicts(units) return [combine_xml_points(d, all_units, handle_units) for d in datasets.values()]
python
def parse_xml_dataset(elem, handle_units): """Create a netCDF-like dataset from XML data.""" points, units = zip(*[parse_xml_point(p) for p in elem.findall('point')]) # Group points by the contents of each point datasets = {} for p in points: datasets.setdefault(tuple(p), []).append(p) all_units = combine_dicts(units) return [combine_xml_points(d, all_units, handle_units) for d in datasets.values()]
[ "def", "parse_xml_dataset", "(", "elem", ",", "handle_units", ")", ":", "points", ",", "units", "=", "zip", "(", "*", "[", "parse_xml_point", "(", "p", ")", "for", "p", "in", "elem", ".", "findall", "(", "'point'", ")", "]", ")", "# Group points by the contents of each point", "datasets", "=", "{", "}", "for", "p", "in", "points", ":", "datasets", ".", "setdefault", "(", "tuple", "(", "p", ")", ",", "[", "]", ")", ".", "append", "(", "p", ")", "all_units", "=", "combine_dicts", "(", "units", ")", "return", "[", "combine_xml_points", "(", "d", ",", "all_units", ",", "handle_units", ")", "for", "d", "in", "datasets", ".", "values", "(", ")", "]" ]
Create a netCDF-like dataset from XML data.
[ "Create", "a", "netCDF", "-", "like", "dataset", "from", "XML", "data", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L348-L357
Unidata/siphon
siphon/ncss.py
parse_csv_response
def parse_csv_response(data, unit_handler): """Handle CSV-formatted HTTP responses.""" return squish([parse_csv_dataset(d, unit_handler) for d in data.split(b'\n\n')])
python
def parse_csv_response(data, unit_handler): """Handle CSV-formatted HTTP responses.""" return squish([parse_csv_dataset(d, unit_handler) for d in data.split(b'\n\n')])
[ "def", "parse_csv_response", "(", "data", ",", "unit_handler", ")", ":", "return", "squish", "(", "[", "parse_csv_dataset", "(", "d", ",", "unit_handler", ")", "for", "d", "in", "data", ".", "split", "(", "b'\\n\\n'", ")", "]", ")" ]
Handle CSV-formatted HTTP responses.
[ "Handle", "CSV", "-", "formatted", "HTTP", "responses", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L403-L405
Unidata/siphon
siphon/ncss.py
parse_csv_header
def parse_csv_header(line): """Parse the CSV header returned by TDS.""" units = {} names = [] for var in line.split(','): start = var.find('[') if start < 0: names.append(str(var)) continue else: names.append(str(var[:start])) end = var.find(']', start) unitstr = var[start + 1:end] eq = unitstr.find('=') if eq >= 0: # go past = and ", skip final " units[names[-1]] = unitstr[eq + 2:-1] return names, units
python
def parse_csv_header(line): """Parse the CSV header returned by TDS.""" units = {} names = [] for var in line.split(','): start = var.find('[') if start < 0: names.append(str(var)) continue else: names.append(str(var[:start])) end = var.find(']', start) unitstr = var[start + 1:end] eq = unitstr.find('=') if eq >= 0: # go past = and ", skip final " units[names[-1]] = unitstr[eq + 2:-1] return names, units
[ "def", "parse_csv_header", "(", "line", ")", ":", "units", "=", "{", "}", "names", "=", "[", "]", "for", "var", "in", "line", ".", "split", "(", "','", ")", ":", "start", "=", "var", ".", "find", "(", "'['", ")", "if", "start", "<", "0", ":", "names", ".", "append", "(", "str", "(", "var", ")", ")", "continue", "else", ":", "names", ".", "append", "(", "str", "(", "var", "[", ":", "start", "]", ")", ")", "end", "=", "var", ".", "find", "(", "']'", ",", "start", ")", "unitstr", "=", "var", "[", "start", "+", "1", ":", "end", "]", "eq", "=", "unitstr", ".", "find", "(", "'='", ")", "if", "eq", ">=", "0", ":", "# go past = and \", skip final \"", "units", "[", "names", "[", "-", "1", "]", "]", "=", "unitstr", "[", "eq", "+", "2", ":", "-", "1", "]", "return", "names", ",", "units" ]
Parse the CSV header returned by TDS.
[ "Parse", "the", "CSV", "header", "returned", "by", "TDS", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L408-L425
Unidata/siphon
siphon/ncss.py
parse_csv_dataset
def parse_csv_dataset(data, handle_units): """Parse CSV data into a netCDF-like dataset.""" fobj = BytesIO(data) names, units = parse_csv_header(fobj.readline().decode('utf-8')) arrs = np.genfromtxt(fobj, dtype=None, names=names, delimiter=',', unpack=True, converters={'date': lambda s: parse_iso_date(s.decode('utf-8'))}) d = {} for f in arrs.dtype.fields: dat = arrs[f] if dat.dtype == np.object: dat = dat.tolist() d[f] = handle_units(dat, units.get(f, None)) return d
python
def parse_csv_dataset(data, handle_units): """Parse CSV data into a netCDF-like dataset.""" fobj = BytesIO(data) names, units = parse_csv_header(fobj.readline().decode('utf-8')) arrs = np.genfromtxt(fobj, dtype=None, names=names, delimiter=',', unpack=True, converters={'date': lambda s: parse_iso_date(s.decode('utf-8'))}) d = {} for f in arrs.dtype.fields: dat = arrs[f] if dat.dtype == np.object: dat = dat.tolist() d[f] = handle_units(dat, units.get(f, None)) return d
[ "def", "parse_csv_dataset", "(", "data", ",", "handle_units", ")", ":", "fobj", "=", "BytesIO", "(", "data", ")", "names", ",", "units", "=", "parse_csv_header", "(", "fobj", ".", "readline", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "arrs", "=", "np", ".", "genfromtxt", "(", "fobj", ",", "dtype", "=", "None", ",", "names", "=", "names", ",", "delimiter", "=", "','", ",", "unpack", "=", "True", ",", "converters", "=", "{", "'date'", ":", "lambda", "s", ":", "parse_iso_date", "(", "s", ".", "decode", "(", "'utf-8'", ")", ")", "}", ")", "d", "=", "{", "}", "for", "f", "in", "arrs", ".", "dtype", ".", "fields", ":", "dat", "=", "arrs", "[", "f", "]", "if", "dat", ".", "dtype", "==", "np", ".", "object", ":", "dat", "=", "dat", ".", "tolist", "(", ")", "d", "[", "f", "]", "=", "handle_units", "(", "dat", ",", "units", ".", "get", "(", "f", ",", "None", ")", ")", "return", "d" ]
Parse CSV data into a netCDF-like dataset.
[ "Parse", "CSV", "data", "into", "a", "netCDF", "-", "like", "dataset", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L428-L440
Unidata/siphon
siphon/ncss.py
NCSS.validate_query
def validate_query(self, query): """Validate a query. Determines whether `query` is well-formed. This includes checking for all required parameters, as well as checking parameters for valid values. Parameters ---------- query : NCSSQuery The query to validate Returns ------- valid : bool Whether `query` is valid. """ # Make sure all variables are in the dataset return bool(query.var) and all(var in self.variables for var in query.var)
python
def validate_query(self, query): """Validate a query. Determines whether `query` is well-formed. This includes checking for all required parameters, as well as checking parameters for valid values. Parameters ---------- query : NCSSQuery The query to validate Returns ------- valid : bool Whether `query` is valid. """ # Make sure all variables are in the dataset return bool(query.var) and all(var in self.variables for var in query.var)
[ "def", "validate_query", "(", "self", ",", "query", ")", ":", "# Make sure all variables are in the dataset", "return", "bool", "(", "query", ".", "var", ")", "and", "all", "(", "var", "in", "self", ".", "variables", "for", "var", "in", "query", ".", "var", ")" ]
Validate a query. Determines whether `query` is well-formed. This includes checking for all required parameters, as well as checking parameters for valid values. Parameters ---------- query : NCSSQuery The query to validate Returns ------- valid : bool Whether `query` is valid.
[ "Validate", "a", "query", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L73-L91
Unidata/siphon
siphon/ncss.py
NCSS.get_data
def get_data(self, query): """Fetch parsed data from a THREDDS server using NCSS. Requests data from the NCSS endpoint given the parameters in `query` and handles parsing of the returned content based on the mimetype. Parameters ---------- query : NCSSQuery The parameters to send to the NCSS endpoint Returns ------- Parsed data response from the server. Exact format depends on the format of the response. See Also -------- get_data_raw """ resp = self.get_query(query) return response_handlers(resp, self.unit_handler)
python
def get_data(self, query): """Fetch parsed data from a THREDDS server using NCSS. Requests data from the NCSS endpoint given the parameters in `query` and handles parsing of the returned content based on the mimetype. Parameters ---------- query : NCSSQuery The parameters to send to the NCSS endpoint Returns ------- Parsed data response from the server. Exact format depends on the format of the response. See Also -------- get_data_raw """ resp = self.get_query(query) return response_handlers(resp, self.unit_handler)
[ "def", "get_data", "(", "self", ",", "query", ")", ":", "resp", "=", "self", ".", "get_query", "(", "query", ")", "return", "response_handlers", "(", "resp", ",", "self", ".", "unit_handler", ")" ]
Fetch parsed data from a THREDDS server using NCSS. Requests data from the NCSS endpoint given the parameters in `query` and handles parsing of the returned content based on the mimetype. Parameters ---------- query : NCSSQuery The parameters to send to the NCSS endpoint Returns ------- Parsed data response from the server. Exact format depends on the format of the response. See Also -------- get_data_raw
[ "Fetch", "parsed", "data", "from", "a", "THREDDS", "server", "using", "NCSS", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L93-L115
Unidata/siphon
siphon/ncss.py
NCSSQuery.projection_box
def projection_box(self, min_x, min_y, max_x, max_y): """Add a bounding box in projected (native) coordinates to the query. This adds a request for a spatial bounding box, bounded by (`min_x`, `max_x`) for x direction and (`min_y`, `max_y`) for the y direction. This modifies the query in-place, but returns ``self`` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- min_x : float The left edge of the bounding box min_y : float The bottom edge of the bounding box max_x : float The right edge of the bounding box max_y: float The top edge of the bounding box Returns ------- self : NCSSQuery Returns self for chaining calls """ self._set_query(self.spatial_query, minx=min_x, miny=min_y, maxx=max_x, maxy=max_y) return self
python
def projection_box(self, min_x, min_y, max_x, max_y): """Add a bounding box in projected (native) coordinates to the query. This adds a request for a spatial bounding box, bounded by (`min_x`, `max_x`) for x direction and (`min_y`, `max_y`) for the y direction. This modifies the query in-place, but returns ``self`` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- min_x : float The left edge of the bounding box min_y : float The bottom edge of the bounding box max_x : float The right edge of the bounding box max_y: float The top edge of the bounding box Returns ------- self : NCSSQuery Returns self for chaining calls """ self._set_query(self.spatial_query, minx=min_x, miny=min_y, maxx=max_x, maxy=max_y) return self
[ "def", "projection_box", "(", "self", ",", "min_x", ",", "min_y", ",", "max_x", ",", "max_y", ")", ":", "self", ".", "_set_query", "(", "self", ".", "spatial_query", ",", "minx", "=", "min_x", ",", "miny", "=", "min_y", ",", "maxx", "=", "max_x", ",", "maxy", "=", "max_y", ")", "return", "self" ]
Add a bounding box in projected (native) coordinates to the query. This adds a request for a spatial bounding box, bounded by (`min_x`, `max_x`) for x direction and (`min_y`, `max_y`) for the y direction. This modifies the query in-place, but returns ``self`` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- min_x : float The left edge of the bounding box min_y : float The bottom edge of the bounding box max_x : float The right edge of the bounding box max_y: float The top edge of the bounding box Returns ------- self : NCSSQuery Returns self for chaining calls
[ "Add", "a", "bounding", "box", "in", "projected", "(", "native", ")", "coordinates", "to", "the", "query", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L148-L177
Unidata/siphon
siphon/ncss.py
NCSSQuery.strides
def strides(self, time=None, spatial=None): """Set time and/or spatial (horizontal) strides. This is only used on grid requests. Used to skip points in the returned data. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. Parameters ---------- time : int, optional Stride for times returned. Defaults to None, which is equivalent to 1. spatial : int, optional Stride for horizontal grid. Defaults to None, which is equivalent to 1. Returns ------- self : NCSSQuery Returns self for chaining calls """ if time: self.add_query_parameter(timeStride=time) if spatial: self.add_query_parameter(horizStride=spatial) return self
python
def strides(self, time=None, spatial=None): """Set time and/or spatial (horizontal) strides. This is only used on grid requests. Used to skip points in the returned data. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. Parameters ---------- time : int, optional Stride for times returned. Defaults to None, which is equivalent to 1. spatial : int, optional Stride for horizontal grid. Defaults to None, which is equivalent to 1. Returns ------- self : NCSSQuery Returns self for chaining calls """ if time: self.add_query_parameter(timeStride=time) if spatial: self.add_query_parameter(horizStride=spatial) return self
[ "def", "strides", "(", "self", ",", "time", "=", "None", ",", "spatial", "=", "None", ")", ":", "if", "time", ":", "self", ".", "add_query_parameter", "(", "timeStride", "=", "time", ")", "if", "spatial", ":", "self", ".", "add_query_parameter", "(", "horizStride", "=", "spatial", ")", "return", "self" ]
Set time and/or spatial (horizontal) strides. This is only used on grid requests. Used to skip points in the returned data. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. Parameters ---------- time : int, optional Stride for times returned. Defaults to None, which is equivalent to 1. spatial : int, optional Stride for horizontal grid. Defaults to None, which is equivalent to 1. Returns ------- self : NCSSQuery Returns self for chaining calls
[ "Set", "time", "and", "/", "or", "spatial", "(", "horizontal", ")", "strides", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L218-L242
Unidata/siphon
siphon/ncss.py
ResponseRegistry.register
def register(self, mimetype): """Register a function to handle a particular mimetype.""" def dec(func): self._reg[mimetype] = func return func return dec
python
def register(self, mimetype): """Register a function to handle a particular mimetype.""" def dec(func): self._reg[mimetype] = func return func return dec
[ "def", "register", "(", "self", ",", "mimetype", ")", ":", "def", "dec", "(", "func", ")", ":", "self", ".", "_reg", "[", "mimetype", "]", "=", "func", "return", "func", "return", "dec" ]
Register a function to handle a particular mimetype.
[ "Register", "a", "function", "to", "handle", "a", "particular", "mimetype", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L279-L284
Unidata/siphon
siphon/ncss_dataset.py
_Types.handle_typed_values
def handle_typed_values(val, type_name, value_type): """Translate typed values into the appropriate python object. Takes an element name, value, and type and returns a list with the string value(s) properly converted to a python type. TypedValues are handled in ucar.ma2.DataType in netcdfJava in the DataType enum. Possibilities are: "boolean" "byte" "char" "short" "int" "long" "float" "double" "Sequence" "String" "Structure" "enum1" "enum2" "enum4" "opaque" "object" All of these are values written as strings in the xml, so simply applying int, float to the values will work in most cases (i.e. the TDS encodes them as string values properly). Examle XML element: <attribute name="scale_factor" type="double" value="0.0010000000474974513"/> Parameters ---------- val : string The string representation of the value attribute of the xml element type_name : string The string representation of the name attribute of the xml element value_type : string The string representation of the type attribute of the xml element Returns ------- val : list A list containing the properly typed python values. """ if value_type in ['byte', 'short', 'int', 'long']: try: val = [int(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to int. Keeping type as str.', val) elif value_type in ['float', 'double']: try: val = [float(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to float. Keeping type as str.', val) elif value_type == 'boolean': try: # special case for boolean type val = val.split() # values must be either true or false for potential_bool in val: if potential_bool not in ['true', 'false']: raise ValueError val = [True if item == 'true' else False for item in val] except ValueError: msg = 'Cannot convert values %s to boolean.' msg += ' Keeping type as str.' log.warning(msg, val) elif value_type == 'String': # nothing special for String type pass else: # possibilities - Sequence, Structure, enum, opaque, object, # and char. # Not sure how to handle these as I do not have an example # of how they would show up in dataset.xml log.warning('%s type %s not understood. Keeping as String.', type_name, value_type) if not isinstance(val, list): val = [val] return val
python
def handle_typed_values(val, type_name, value_type): """Translate typed values into the appropriate python object. Takes an element name, value, and type and returns a list with the string value(s) properly converted to a python type. TypedValues are handled in ucar.ma2.DataType in netcdfJava in the DataType enum. Possibilities are: "boolean" "byte" "char" "short" "int" "long" "float" "double" "Sequence" "String" "Structure" "enum1" "enum2" "enum4" "opaque" "object" All of these are values written as strings in the xml, so simply applying int, float to the values will work in most cases (i.e. the TDS encodes them as string values properly). Examle XML element: <attribute name="scale_factor" type="double" value="0.0010000000474974513"/> Parameters ---------- val : string The string representation of the value attribute of the xml element type_name : string The string representation of the name attribute of the xml element value_type : string The string representation of the type attribute of the xml element Returns ------- val : list A list containing the properly typed python values. """ if value_type in ['byte', 'short', 'int', 'long']: try: val = [int(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to int. Keeping type as str.', val) elif value_type in ['float', 'double']: try: val = [float(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to float. Keeping type as str.', val) elif value_type == 'boolean': try: # special case for boolean type val = val.split() # values must be either true or false for potential_bool in val: if potential_bool not in ['true', 'false']: raise ValueError val = [True if item == 'true' else False for item in val] except ValueError: msg = 'Cannot convert values %s to boolean.' msg += ' Keeping type as str.' log.warning(msg, val) elif value_type == 'String': # nothing special for String type pass else: # possibilities - Sequence, Structure, enum, opaque, object, # and char. # Not sure how to handle these as I do not have an example # of how they would show up in dataset.xml log.warning('%s type %s not understood. Keeping as String.', type_name, value_type) if not isinstance(val, list): val = [val] return val
[ "def", "handle_typed_values", "(", "val", ",", "type_name", ",", "value_type", ")", ":", "if", "value_type", "in", "[", "'byte'", ",", "'short'", ",", "'int'", ",", "'long'", "]", ":", "try", ":", "val", "=", "[", "int", "(", "v", ")", "for", "v", "in", "re", ".", "split", "(", "'[ ,]'", ",", "val", ")", "if", "v", "]", "except", "ValueError", ":", "log", ".", "warning", "(", "'Cannot convert \"%s\" to int. Keeping type as str.'", ",", "val", ")", "elif", "value_type", "in", "[", "'float'", ",", "'double'", "]", ":", "try", ":", "val", "=", "[", "float", "(", "v", ")", "for", "v", "in", "re", ".", "split", "(", "'[ ,]'", ",", "val", ")", "if", "v", "]", "except", "ValueError", ":", "log", ".", "warning", "(", "'Cannot convert \"%s\" to float. Keeping type as str.'", ",", "val", ")", "elif", "value_type", "==", "'boolean'", ":", "try", ":", "# special case for boolean type", "val", "=", "val", ".", "split", "(", ")", "# values must be either true or false", "for", "potential_bool", "in", "val", ":", "if", "potential_bool", "not", "in", "[", "'true'", ",", "'false'", "]", ":", "raise", "ValueError", "val", "=", "[", "True", "if", "item", "==", "'true'", "else", "False", "for", "item", "in", "val", "]", "except", "ValueError", ":", "msg", "=", "'Cannot convert values %s to boolean.'", "msg", "+=", "' Keeping type as str.'", "log", ".", "warning", "(", "msg", ",", "val", ")", "elif", "value_type", "==", "'String'", ":", "# nothing special for String type", "pass", "else", ":", "# possibilities - Sequence, Structure, enum, opaque, object,", "# and char.", "# Not sure how to handle these as I do not have an example", "# of how they would show up in dataset.xml", "log", ".", "warning", "(", "'%s type %s not understood. Keeping as String.'", ",", "type_name", ",", "value_type", ")", "if", "not", "isinstance", "(", "val", ",", "list", ")", ":", "val", "=", "[", "val", "]", "return", "val" ]
Translate typed values into the appropriate python object. Takes an element name, value, and type and returns a list with the string value(s) properly converted to a python type. TypedValues are handled in ucar.ma2.DataType in netcdfJava in the DataType enum. Possibilities are: "boolean" "byte" "char" "short" "int" "long" "float" "double" "Sequence" "String" "Structure" "enum1" "enum2" "enum4" "opaque" "object" All of these are values written as strings in the xml, so simply applying int, float to the values will work in most cases (i.e. the TDS encodes them as string values properly). Examle XML element: <attribute name="scale_factor" type="double" value="0.0010000000474974513"/> Parameters ---------- val : string The string representation of the value attribute of the xml element type_name : string The string representation of the name attribute of the xml element value_type : string The string representation of the type attribute of the xml element Returns ------- val : list A list containing the properly typed python values.
[ "Translate", "typed", "values", "into", "the", "appropriate", "python", "object", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss_dataset.py#L26-L114
Unidata/siphon
siphon/simplewebservice/wyoming.py
WyomingUpperAir._get_data
def _get_data(self, time, site_id): r"""Download and parse upper air observations from an online archive. Parameters ---------- time : datetime The date and time of the desired observation. site_id : str The three letter ICAO identifier of the station for which data should be downloaded. Returns ------- :class:`pandas.DataFrame` containing the data """ raw_data = self._get_data_raw(time, site_id) soup = BeautifulSoup(raw_data, 'html.parser') tabular_data = StringIO(soup.find_all('pre')[0].contents[0]) col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed'] df = pd.read_fwf(tabular_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names) df['u_wind'], df['v_wind'] = get_wind_components(df['speed'], np.deg2rad(df['direction'])) # Drop any rows with all NaN values for T, Td, winds df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) # Parse metadata meta_data = soup.find_all('pre')[1].contents[0] lines = meta_data.splitlines() # If the station doesn't have a name identified we need to insert a # record showing this for parsing to proceed. if 'Station number' in lines[1]: lines.insert(1, 'Station identifier: ') station = lines[1].split(':')[1].strip() station_number = int(lines[2].split(':')[1].strip()) sounding_time = datetime.strptime(lines[3].split(':')[1].strip(), '%y%m%d/%H%M') latitude = float(lines[4].split(':')[1].strip()) longitude = float(lines[5].split(':')[1].strip()) elevation = float(lines[6].split(':')[1].strip()) df['station'] = station df['station_number'] = station_number df['time'] = sounding_time df['latitude'] = latitude df['longitude'] = longitude df['elevation'] = elevation # Add unit dictionary df.units = {'pressure': 'hPa', 'height': 'meter', 'temperature': 'degC', 'dewpoint': 'degC', 'direction': 'degrees', 'speed': 'knot', 'u_wind': 'knot', 'v_wind': 'knot', 'station': None, 'station_number': None, 'time': None, 'latitude': 'degrees', 'longitude': 'degrees', 'elevation': 'meter'} return df
python
def _get_data(self, time, site_id): r"""Download and parse upper air observations from an online archive. Parameters ---------- time : datetime The date and time of the desired observation. site_id : str The three letter ICAO identifier of the station for which data should be downloaded. Returns ------- :class:`pandas.DataFrame` containing the data """ raw_data = self._get_data_raw(time, site_id) soup = BeautifulSoup(raw_data, 'html.parser') tabular_data = StringIO(soup.find_all('pre')[0].contents[0]) col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed'] df = pd.read_fwf(tabular_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names) df['u_wind'], df['v_wind'] = get_wind_components(df['speed'], np.deg2rad(df['direction'])) # Drop any rows with all NaN values for T, Td, winds df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) # Parse metadata meta_data = soup.find_all('pre')[1].contents[0] lines = meta_data.splitlines() # If the station doesn't have a name identified we need to insert a # record showing this for parsing to proceed. if 'Station number' in lines[1]: lines.insert(1, 'Station identifier: ') station = lines[1].split(':')[1].strip() station_number = int(lines[2].split(':')[1].strip()) sounding_time = datetime.strptime(lines[3].split(':')[1].strip(), '%y%m%d/%H%M') latitude = float(lines[4].split(':')[1].strip()) longitude = float(lines[5].split(':')[1].strip()) elevation = float(lines[6].split(':')[1].strip()) df['station'] = station df['station_number'] = station_number df['time'] = sounding_time df['latitude'] = latitude df['longitude'] = longitude df['elevation'] = elevation # Add unit dictionary df.units = {'pressure': 'hPa', 'height': 'meter', 'temperature': 'degC', 'dewpoint': 'degC', 'direction': 'degrees', 'speed': 'knot', 'u_wind': 'knot', 'v_wind': 'knot', 'station': None, 'station_number': None, 'time': None, 'latitude': 'degrees', 'longitude': 'degrees', 'elevation': 'meter'} return df
[ "def", "_get_data", "(", "self", ",", "time", ",", "site_id", ")", ":", "raw_data", "=", "self", ".", "_get_data_raw", "(", "time", ",", "site_id", ")", "soup", "=", "BeautifulSoup", "(", "raw_data", ",", "'html.parser'", ")", "tabular_data", "=", "StringIO", "(", "soup", ".", "find_all", "(", "'pre'", ")", "[", "0", "]", ".", "contents", "[", "0", "]", ")", "col_names", "=", "[", "'pressure'", ",", "'height'", ",", "'temperature'", ",", "'dewpoint'", ",", "'direction'", ",", "'speed'", "]", "df", "=", "pd", ".", "read_fwf", "(", "tabular_data", ",", "skiprows", "=", "5", ",", "usecols", "=", "[", "0", ",", "1", ",", "2", ",", "3", ",", "6", ",", "7", "]", ",", "names", "=", "col_names", ")", "df", "[", "'u_wind'", "]", ",", "df", "[", "'v_wind'", "]", "=", "get_wind_components", "(", "df", "[", "'speed'", "]", ",", "np", ".", "deg2rad", "(", "df", "[", "'direction'", "]", ")", ")", "# Drop any rows with all NaN values for T, Td, winds", "df", "=", "df", ".", "dropna", "(", "subset", "=", "(", "'temperature'", ",", "'dewpoint'", ",", "'direction'", ",", "'speed'", ",", "'u_wind'", ",", "'v_wind'", ")", ",", "how", "=", "'all'", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "# Parse metadata", "meta_data", "=", "soup", ".", "find_all", "(", "'pre'", ")", "[", "1", "]", ".", "contents", "[", "0", "]", "lines", "=", "meta_data", ".", "splitlines", "(", ")", "# If the station doesn't have a name identified we need to insert a", "# record showing this for parsing to proceed.", "if", "'Station number'", "in", "lines", "[", "1", "]", ":", "lines", ".", "insert", "(", "1", ",", "'Station identifier: '", ")", "station", "=", "lines", "[", "1", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", "station_number", "=", "int", "(", "lines", "[", "2", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", ")", "sounding_time", "=", "datetime", ".", "strptime", "(", "lines", "[", "3", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", ",", "'%y%m%d/%H%M'", ")", "latitude", "=", "float", "(", "lines", "[", "4", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", ")", "longitude", "=", "float", "(", "lines", "[", "5", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", ")", "elevation", "=", "float", "(", "lines", "[", "6", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", ")", "df", "[", "'station'", "]", "=", "station", "df", "[", "'station_number'", "]", "=", "station_number", "df", "[", "'time'", "]", "=", "sounding_time", "df", "[", "'latitude'", "]", "=", "latitude", "df", "[", "'longitude'", "]", "=", "longitude", "df", "[", "'elevation'", "]", "=", "elevation", "# Add unit dictionary", "df", ".", "units", "=", "{", "'pressure'", ":", "'hPa'", ",", "'height'", ":", "'meter'", ",", "'temperature'", ":", "'degC'", ",", "'dewpoint'", ":", "'degC'", ",", "'direction'", ":", "'degrees'", ",", "'speed'", ":", "'knot'", ",", "'u_wind'", ":", "'knot'", ",", "'v_wind'", ":", "'knot'", ",", "'station'", ":", "None", ",", "'station_number'", ":", "None", ",", "'time'", ":", "None", ",", "'latitude'", ":", "'degrees'", ",", "'longitude'", ":", "'degrees'", ",", "'elevation'", ":", "'meter'", "}", "return", "df" ]
r"""Download and parse upper air observations from an online archive. Parameters ---------- time : datetime The date and time of the desired observation. site_id : str The three letter ICAO identifier of the station for which data should be downloaded. Returns ------- :class:`pandas.DataFrame` containing the data
[ "r", "Download", "and", "parse", "upper", "air", "observations", "from", "an", "online", "archive", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/wyoming.py#L52-L119
Unidata/siphon
siphon/simplewebservice/wyoming.py
WyomingUpperAir._get_data_raw
def _get_data_raw(self, time, site_id): """Download data from the University of Wyoming's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded Returns ------- text of the server response """ path = ('?region=naconf&TYPE=TEXT%3ALIST' '&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}' '&STNM={stid}').format(time=time, stid=site_id) resp = self.get_path(path) # See if the return is valid, but has no data if resp.text.find('Can\'t') != -1: raise ValueError( 'No data available for {time:%Y-%m-%d %HZ} ' 'for station {stid}.'.format(time=time, stid=site_id)) return resp.text
python
def _get_data_raw(self, time, site_id): """Download data from the University of Wyoming's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded Returns ------- text of the server response """ path = ('?region=naconf&TYPE=TEXT%3ALIST' '&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}' '&STNM={stid}').format(time=time, stid=site_id) resp = self.get_path(path) # See if the return is valid, but has no data if resp.text.find('Can\'t') != -1: raise ValueError( 'No data available for {time:%Y-%m-%d %HZ} ' 'for station {stid}.'.format(time=time, stid=site_id)) return resp.text
[ "def", "_get_data_raw", "(", "self", ",", "time", ",", "site_id", ")", ":", "path", "=", "(", "'?region=naconf&TYPE=TEXT%3ALIST'", "'&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}'", "'&STNM={stid}'", ")", ".", "format", "(", "time", "=", "time", ",", "stid", "=", "site_id", ")", "resp", "=", "self", ".", "get_path", "(", "path", ")", "# See if the return is valid, but has no data", "if", "resp", ".", "text", ".", "find", "(", "'Can\\'t'", ")", "!=", "-", "1", ":", "raise", "ValueError", "(", "'No data available for {time:%Y-%m-%d %HZ} '", "'for station {stid}.'", ".", "format", "(", "time", "=", "time", ",", "stid", "=", "site_id", ")", ")", "return", "resp", ".", "text" ]
Download data from the University of Wyoming's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded Returns ------- text of the server response
[ "Download", "data", "from", "the", "University", "of", "Wyoming", "s", "upper", "air", "archive", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/wyoming.py#L121-L147
Unidata/siphon
siphon/cdmr/ncstream.py
read_ncstream_data
def read_ncstream_data(fobj): """Handle reading an NcStream v1 data block from a file-like object.""" data = read_proto_object(fobj, stream.Data) if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata: log.debug('Reading string/opaque/vlen') num_obj = read_var_int(fobj) log.debug('Num objects: %d', num_obj) blocks = [read_block(fobj) for _ in range(num_obj)] if data.dataType == stream.STRING: blocks = [b.decode('utf-8', errors='ignore') for b in blocks] # Again endian isn't coded properly dt = data_type_to_numpy(data.dataType).newbyteorder('>') if data.vdata: return np.array([np.frombuffer(b, dtype=dt) for b in blocks]) else: return np.array(blocks, dtype=dt) elif data.dataType in _dtypeLookup: log.debug('Reading array data') bin_data = read_block(fobj) log.debug('Binary data: %s', bin_data) # Hard code to big endian for now since it's not encoded correctly dt = data_type_to_numpy(data.dataType).newbyteorder('>') # Handle decompressing the bytes if data.compress == stream.DEFLATE: bin_data = zlib.decompress(bin_data) assert len(bin_data) == data.uncompressedSize elif data.compress != stream.NONE: raise NotImplementedError('Compression type {0} not implemented!'.format( data.compress)) # Turn bytes into an array return reshape_array(data, np.frombuffer(bin_data, dtype=dt)) elif data.dataType == stream.STRUCTURE: sd = read_proto_object(fobj, stream.StructureData) # Make a datatype appropriate to the rows of struct endian = '>' if data.bigend else '<' dt = np.dtype([(endian, np.void, sd.rowLength)]) # Turn bytes into an array return reshape_array(data, np.frombuffer(sd.data, dtype=dt)) elif data.dataType == stream.SEQUENCE: log.debug('Reading sequence') blocks = [] magic = read_magic(fobj) while magic != MAGIC_VEND: if magic == MAGIC_VDATA: log.error('Bad magic for struct/seq data!') blocks.append(read_proto_object(fobj, stream.StructureData)) magic = read_magic(fobj) return data, blocks else: raise NotImplementedError("Don't know how to handle data type: {0}".format( data.dataType))
python
def read_ncstream_data(fobj): """Handle reading an NcStream v1 data block from a file-like object.""" data = read_proto_object(fobj, stream.Data) if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata: log.debug('Reading string/opaque/vlen') num_obj = read_var_int(fobj) log.debug('Num objects: %d', num_obj) blocks = [read_block(fobj) for _ in range(num_obj)] if data.dataType == stream.STRING: blocks = [b.decode('utf-8', errors='ignore') for b in blocks] # Again endian isn't coded properly dt = data_type_to_numpy(data.dataType).newbyteorder('>') if data.vdata: return np.array([np.frombuffer(b, dtype=dt) for b in blocks]) else: return np.array(blocks, dtype=dt) elif data.dataType in _dtypeLookup: log.debug('Reading array data') bin_data = read_block(fobj) log.debug('Binary data: %s', bin_data) # Hard code to big endian for now since it's not encoded correctly dt = data_type_to_numpy(data.dataType).newbyteorder('>') # Handle decompressing the bytes if data.compress == stream.DEFLATE: bin_data = zlib.decompress(bin_data) assert len(bin_data) == data.uncompressedSize elif data.compress != stream.NONE: raise NotImplementedError('Compression type {0} not implemented!'.format( data.compress)) # Turn bytes into an array return reshape_array(data, np.frombuffer(bin_data, dtype=dt)) elif data.dataType == stream.STRUCTURE: sd = read_proto_object(fobj, stream.StructureData) # Make a datatype appropriate to the rows of struct endian = '>' if data.bigend else '<' dt = np.dtype([(endian, np.void, sd.rowLength)]) # Turn bytes into an array return reshape_array(data, np.frombuffer(sd.data, dtype=dt)) elif data.dataType == stream.SEQUENCE: log.debug('Reading sequence') blocks = [] magic = read_magic(fobj) while magic != MAGIC_VEND: if magic == MAGIC_VDATA: log.error('Bad magic for struct/seq data!') blocks.append(read_proto_object(fobj, stream.StructureData)) magic = read_magic(fobj) return data, blocks else: raise NotImplementedError("Don't know how to handle data type: {0}".format( data.dataType))
[ "def", "read_ncstream_data", "(", "fobj", ")", ":", "data", "=", "read_proto_object", "(", "fobj", ",", "stream", ".", "Data", ")", "if", "data", ".", "dataType", "in", "(", "stream", ".", "STRING", ",", "stream", ".", "OPAQUE", ")", "or", "data", ".", "vdata", ":", "log", ".", "debug", "(", "'Reading string/opaque/vlen'", ")", "num_obj", "=", "read_var_int", "(", "fobj", ")", "log", ".", "debug", "(", "'Num objects: %d'", ",", "num_obj", ")", "blocks", "=", "[", "read_block", "(", "fobj", ")", "for", "_", "in", "range", "(", "num_obj", ")", "]", "if", "data", ".", "dataType", "==", "stream", ".", "STRING", ":", "blocks", "=", "[", "b", ".", "decode", "(", "'utf-8'", ",", "errors", "=", "'ignore'", ")", "for", "b", "in", "blocks", "]", "# Again endian isn't coded properly", "dt", "=", "data_type_to_numpy", "(", "data", ".", "dataType", ")", ".", "newbyteorder", "(", "'>'", ")", "if", "data", ".", "vdata", ":", "return", "np", ".", "array", "(", "[", "np", ".", "frombuffer", "(", "b", ",", "dtype", "=", "dt", ")", "for", "b", "in", "blocks", "]", ")", "else", ":", "return", "np", ".", "array", "(", "blocks", ",", "dtype", "=", "dt", ")", "elif", "data", ".", "dataType", "in", "_dtypeLookup", ":", "log", ".", "debug", "(", "'Reading array data'", ")", "bin_data", "=", "read_block", "(", "fobj", ")", "log", ".", "debug", "(", "'Binary data: %s'", ",", "bin_data", ")", "# Hard code to big endian for now since it's not encoded correctly", "dt", "=", "data_type_to_numpy", "(", "data", ".", "dataType", ")", ".", "newbyteorder", "(", "'>'", ")", "# Handle decompressing the bytes", "if", "data", ".", "compress", "==", "stream", ".", "DEFLATE", ":", "bin_data", "=", "zlib", ".", "decompress", "(", "bin_data", ")", "assert", "len", "(", "bin_data", ")", "==", "data", ".", "uncompressedSize", "elif", "data", ".", "compress", "!=", "stream", ".", "NONE", ":", "raise", "NotImplementedError", "(", "'Compression type {0} not implemented!'", ".", "format", "(", "data", ".", "compress", ")", ")", "# Turn bytes into an array", "return", "reshape_array", "(", "data", ",", "np", ".", "frombuffer", "(", "bin_data", ",", "dtype", "=", "dt", ")", ")", "elif", "data", ".", "dataType", "==", "stream", ".", "STRUCTURE", ":", "sd", "=", "read_proto_object", "(", "fobj", ",", "stream", ".", "StructureData", ")", "# Make a datatype appropriate to the rows of struct", "endian", "=", "'>'", "if", "data", ".", "bigend", "else", "'<'", "dt", "=", "np", ".", "dtype", "(", "[", "(", "endian", ",", "np", ".", "void", ",", "sd", ".", "rowLength", ")", "]", ")", "# Turn bytes into an array", "return", "reshape_array", "(", "data", ",", "np", ".", "frombuffer", "(", "sd", ".", "data", ",", "dtype", "=", "dt", ")", ")", "elif", "data", ".", "dataType", "==", "stream", ".", "SEQUENCE", ":", "log", ".", "debug", "(", "'Reading sequence'", ")", "blocks", "=", "[", "]", "magic", "=", "read_magic", "(", "fobj", ")", "while", "magic", "!=", "MAGIC_VEND", ":", "if", "magic", "==", "MAGIC_VDATA", ":", "log", ".", "error", "(", "'Bad magic for struct/seq data!'", ")", "blocks", ".", "append", "(", "read_proto_object", "(", "fobj", ",", "stream", ".", "StructureData", ")", ")", "magic", "=", "read_magic", "(", "fobj", ")", "return", "data", ",", "blocks", "else", ":", "raise", "NotImplementedError", "(", "\"Don't know how to handle data type: {0}\"", ".", "format", "(", "data", ".", "dataType", ")", ")" ]
Handle reading an NcStream v1 data block from a file-like object.
[ "Handle", "reading", "an", "NcStream", "v1", "data", "block", "from", "a", "file", "-", "like", "object", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L35-L91
Unidata/siphon
siphon/cdmr/ncstream.py
read_ncstream_err
def read_ncstream_err(fobj): """Handle reading an NcStream error from a file-like object and raise as error.""" err = read_proto_object(fobj, stream.Error) raise RuntimeError(err.message)
python
def read_ncstream_err(fobj): """Handle reading an NcStream error from a file-like object and raise as error.""" err = read_proto_object(fobj, stream.Error) raise RuntimeError(err.message)
[ "def", "read_ncstream_err", "(", "fobj", ")", ":", "err", "=", "read_proto_object", "(", "fobj", ",", "stream", ".", "Error", ")", "raise", "RuntimeError", "(", "err", ".", "message", ")" ]
Handle reading an NcStream error from a file-like object and raise as error.
[ "Handle", "reading", "an", "NcStream", "error", "from", "a", "file", "-", "like", "object", "and", "raise", "as", "error", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L100-L103
Unidata/siphon
siphon/cdmr/ncstream.py
read_messages
def read_messages(fobj, magic_table): """Read messages from a file-like object until stream is exhausted.""" messages = [] while True: magic = read_magic(fobj) if not magic: break func = magic_table.get(magic) if func is not None: messages.append(func(fobj)) else: log.error('Unknown magic: ' + str(' '.join('{0:02x}'.format(b) for b in bytearray(magic)))) return messages
python
def read_messages(fobj, magic_table): """Read messages from a file-like object until stream is exhausted.""" messages = [] while True: magic = read_magic(fobj) if not magic: break func = magic_table.get(magic) if func is not None: messages.append(func(fobj)) else: log.error('Unknown magic: ' + str(' '.join('{0:02x}'.format(b) for b in bytearray(magic)))) return messages
[ "def", "read_messages", "(", "fobj", ",", "magic_table", ")", ":", "messages", "=", "[", "]", "while", "True", ":", "magic", "=", "read_magic", "(", "fobj", ")", "if", "not", "magic", ":", "break", "func", "=", "magic_table", ".", "get", "(", "magic", ")", "if", "func", "is", "not", "None", ":", "messages", ".", "append", "(", "func", "(", "fobj", ")", ")", "else", ":", "log", ".", "error", "(", "'Unknown magic: '", "+", "str", "(", "' '", ".", "join", "(", "'{0:02x}'", ".", "format", "(", "b", ")", "for", "b", "in", "bytearray", "(", "magic", ")", ")", ")", ")", "return", "messages" ]
Read messages from a file-like object until stream is exhausted.
[ "Read", "messages", "from", "a", "file", "-", "like", "object", "until", "stream", "is", "exhausted", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L134-L150
Unidata/siphon
siphon/cdmr/ncstream.py
read_proto_object
def read_proto_object(fobj, klass): """Read a block of data and parse using the given protobuf object.""" log.debug('%s chunk', klass.__name__) obj = klass() obj.ParseFromString(read_block(fobj)) log.debug('Header: %s', str(obj)) return obj
python
def read_proto_object(fobj, klass): """Read a block of data and parse using the given protobuf object.""" log.debug('%s chunk', klass.__name__) obj = klass() obj.ParseFromString(read_block(fobj)) log.debug('Header: %s', str(obj)) return obj
[ "def", "read_proto_object", "(", "fobj", ",", "klass", ")", ":", "log", ".", "debug", "(", "'%s chunk'", ",", "klass", ".", "__name__", ")", "obj", "=", "klass", "(", ")", "obj", ".", "ParseFromString", "(", "read_block", "(", "fobj", ")", ")", "log", ".", "debug", "(", "'Header: %s'", ",", "str", "(", "obj", ")", ")", "return", "obj" ]
Read a block of data and parse using the given protobuf object.
[ "Read", "a", "block", "of", "data", "and", "parse", "using", "the", "given", "protobuf", "object", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L153-L159
Unidata/siphon
siphon/cdmr/ncstream.py
read_block
def read_block(fobj): """Read a block. Reads a block from a file object by first reading the number of bytes to read, which must be encoded as a variable-byte length integer. Parameters ---------- fobj : file-like object The file to read from. Returns ------- bytes block of bytes read """ num = read_var_int(fobj) log.debug('Next block: %d bytes', num) return fobj.read(num)
python
def read_block(fobj): """Read a block. Reads a block from a file object by first reading the number of bytes to read, which must be encoded as a variable-byte length integer. Parameters ---------- fobj : file-like object The file to read from. Returns ------- bytes block of bytes read """ num = read_var_int(fobj) log.debug('Next block: %d bytes', num) return fobj.read(num)
[ "def", "read_block", "(", "fobj", ")", ":", "num", "=", "read_var_int", "(", "fobj", ")", "log", ".", "debug", "(", "'Next block: %d bytes'", ",", "num", ")", "return", "fobj", ".", "read", "(", "num", ")" ]
Read a block. Reads a block from a file object by first reading the number of bytes to read, which must be encoded as a variable-byte length integer. Parameters ---------- fobj : file-like object The file to read from. Returns ------- bytes block of bytes read
[ "Read", "a", "block", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L179-L198
Unidata/siphon
siphon/cdmr/ncstream.py
process_vlen
def process_vlen(data_header, array): """Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array """ source = iter(array) return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype) for size in data_header.vlens])
python
def process_vlen(data_header, array): """Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array """ source = iter(array) return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype) for size in data_header.vlens])
[ "def", "process_vlen", "(", "data_header", ",", "array", ")", ":", "source", "=", "iter", "(", "array", ")", "return", "np", ".", "array", "(", "[", "np", ".", "fromiter", "(", "itertools", ".", "islice", "(", "source", ",", "size", ")", ",", "dtype", "=", "array", ".", "dtype", ")", "for", "size", "in", "data_header", ".", "vlens", "]", ")" ]
Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array
[ "Process", "vlen", "coming", "back", "from", "NCStream", "v2", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L201-L221
Unidata/siphon
siphon/cdmr/ncstream.py
datacol_to_array
def datacol_to_array(datacol): """Convert DataCol from NCStream v2 into an array with appropriate type. Depending on the data type specified, this extracts data from the appropriate members and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types. Parameters ---------- datacol : DataCol Returns ------- ndarray array containing extracted data """ if datacol.dataType == stream.STRING: arr = np.array(datacol.stringdata, dtype=np.object) elif datacol.dataType == stream.OPAQUE: arr = np.array(datacol.opaquedata, dtype=np.object) elif datacol.dataType == stream.STRUCTURE: members = OrderedDict((mem.name, datacol_to_array(mem)) for mem in datacol.structdata.memberData) log.debug('Struct members:\n%s', str(members)) # str() around name necessary because protobuf gives unicode names, but dtype doesn't # support them on Python 2 dt = np.dtype([(str(name), arr.dtype) for name, arr in members.items()]) log.debug('Struct dtype: %s', str(dt)) arr = np.empty((datacol.nelems,), dtype=dt) for name, arr_data in members.items(): arr[name] = arr_data else: # Make an appropriate datatype endian = '>' if datacol.bigend else '<' dt = data_type_to_numpy(datacol.dataType).newbyteorder(endian) # Turn bytes into an array arr = np.frombuffer(datacol.primdata, dtype=dt) if arr.size != datacol.nelems: log.warning('Array size %d does not agree with nelems %d', arr.size, datacol.nelems) if datacol.isVlen: arr = process_vlen(datacol, arr) if arr.dtype == np.object_: arr = reshape_array(datacol, arr) else: # In this case, the array collapsed, need different resize that # correctly sizes from elements shape = tuple(r.size for r in datacol.section.range) + (datacol.vlens[0],) arr = arr.reshape(*shape) else: arr = reshape_array(datacol, arr) return arr
python
def datacol_to_array(datacol): """Convert DataCol from NCStream v2 into an array with appropriate type. Depending on the data type specified, this extracts data from the appropriate members and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types. Parameters ---------- datacol : DataCol Returns ------- ndarray array containing extracted data """ if datacol.dataType == stream.STRING: arr = np.array(datacol.stringdata, dtype=np.object) elif datacol.dataType == stream.OPAQUE: arr = np.array(datacol.opaquedata, dtype=np.object) elif datacol.dataType == stream.STRUCTURE: members = OrderedDict((mem.name, datacol_to_array(mem)) for mem in datacol.structdata.memberData) log.debug('Struct members:\n%s', str(members)) # str() around name necessary because protobuf gives unicode names, but dtype doesn't # support them on Python 2 dt = np.dtype([(str(name), arr.dtype) for name, arr in members.items()]) log.debug('Struct dtype: %s', str(dt)) arr = np.empty((datacol.nelems,), dtype=dt) for name, arr_data in members.items(): arr[name] = arr_data else: # Make an appropriate datatype endian = '>' if datacol.bigend else '<' dt = data_type_to_numpy(datacol.dataType).newbyteorder(endian) # Turn bytes into an array arr = np.frombuffer(datacol.primdata, dtype=dt) if arr.size != datacol.nelems: log.warning('Array size %d does not agree with nelems %d', arr.size, datacol.nelems) if datacol.isVlen: arr = process_vlen(datacol, arr) if arr.dtype == np.object_: arr = reshape_array(datacol, arr) else: # In this case, the array collapsed, need different resize that # correctly sizes from elements shape = tuple(r.size for r in datacol.section.range) + (datacol.vlens[0],) arr = arr.reshape(*shape) else: arr = reshape_array(datacol, arr) return arr
[ "def", "datacol_to_array", "(", "datacol", ")", ":", "if", "datacol", ".", "dataType", "==", "stream", ".", "STRING", ":", "arr", "=", "np", ".", "array", "(", "datacol", ".", "stringdata", ",", "dtype", "=", "np", ".", "object", ")", "elif", "datacol", ".", "dataType", "==", "stream", ".", "OPAQUE", ":", "arr", "=", "np", ".", "array", "(", "datacol", ".", "opaquedata", ",", "dtype", "=", "np", ".", "object", ")", "elif", "datacol", ".", "dataType", "==", "stream", ".", "STRUCTURE", ":", "members", "=", "OrderedDict", "(", "(", "mem", ".", "name", ",", "datacol_to_array", "(", "mem", ")", ")", "for", "mem", "in", "datacol", ".", "structdata", ".", "memberData", ")", "log", ".", "debug", "(", "'Struct members:\\n%s'", ",", "str", "(", "members", ")", ")", "# str() around name necessary because protobuf gives unicode names, but dtype doesn't", "# support them on Python 2", "dt", "=", "np", ".", "dtype", "(", "[", "(", "str", "(", "name", ")", ",", "arr", ".", "dtype", ")", "for", "name", ",", "arr", "in", "members", ".", "items", "(", ")", "]", ")", "log", ".", "debug", "(", "'Struct dtype: %s'", ",", "str", "(", "dt", ")", ")", "arr", "=", "np", ".", "empty", "(", "(", "datacol", ".", "nelems", ",", ")", ",", "dtype", "=", "dt", ")", "for", "name", ",", "arr_data", "in", "members", ".", "items", "(", ")", ":", "arr", "[", "name", "]", "=", "arr_data", "else", ":", "# Make an appropriate datatype", "endian", "=", "'>'", "if", "datacol", ".", "bigend", "else", "'<'", "dt", "=", "data_type_to_numpy", "(", "datacol", ".", "dataType", ")", ".", "newbyteorder", "(", "endian", ")", "# Turn bytes into an array", "arr", "=", "np", ".", "frombuffer", "(", "datacol", ".", "primdata", ",", "dtype", "=", "dt", ")", "if", "arr", ".", "size", "!=", "datacol", ".", "nelems", ":", "log", ".", "warning", "(", "'Array size %d does not agree with nelems %d'", ",", "arr", ".", "size", ",", "datacol", ".", "nelems", ")", "if", "datacol", ".", "isVlen", ":", "arr", "=", "process_vlen", "(", "datacol", ",", "arr", ")", "if", "arr", ".", "dtype", "==", "np", ".", "object_", ":", "arr", "=", "reshape_array", "(", "datacol", ",", "arr", ")", "else", ":", "# In this case, the array collapsed, need different resize that", "# correctly sizes from elements", "shape", "=", "tuple", "(", "r", ".", "size", "for", "r", "in", "datacol", ".", "section", ".", "range", ")", "+", "(", "datacol", ".", "vlens", "[", "0", "]", ",", ")", "arr", "=", "arr", ".", "reshape", "(", "*", "shape", ")", "else", ":", "arr", "=", "reshape_array", "(", "datacol", ",", "arr", ")", "return", "arr" ]
Convert DataCol from NCStream v2 into an array with appropriate type. Depending on the data type specified, this extracts data from the appropriate members and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types. Parameters ---------- datacol : DataCol Returns ------- ndarray array containing extracted data
[ "Convert", "DataCol", "from", "NCStream", "v2", "into", "an", "array", "with", "appropriate", "type", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L224-L278
Unidata/siphon
siphon/cdmr/ncstream.py
reshape_array
def reshape_array(data_header, array): """Extract the appropriate array shape from the header. Can handle taking a data header and either bytes containing data or a StructureData instance, which will have binary data as well as some additional information. Parameters ---------- array : :class:`numpy.ndarray` data_header : Data """ shape = tuple(r.size for r in data_header.section.range) if shape: return array.reshape(*shape) else: return array
python
def reshape_array(data_header, array): """Extract the appropriate array shape from the header. Can handle taking a data header and either bytes containing data or a StructureData instance, which will have binary data as well as some additional information. Parameters ---------- array : :class:`numpy.ndarray` data_header : Data """ shape = tuple(r.size for r in data_header.section.range) if shape: return array.reshape(*shape) else: return array
[ "def", "reshape_array", "(", "data_header", ",", "array", ")", ":", "shape", "=", "tuple", "(", "r", ".", "size", "for", "r", "in", "data_header", ".", "section", ".", "range", ")", "if", "shape", ":", "return", "array", ".", "reshape", "(", "*", "shape", ")", "else", ":", "return", "array" ]
Extract the appropriate array shape from the header. Can handle taking a data header and either bytes containing data or a StructureData instance, which will have binary data as well as some additional information. Parameters ---------- array : :class:`numpy.ndarray` data_header : Data
[ "Extract", "the", "appropriate", "array", "shape", "from", "the", "header", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L281-L297
Unidata/siphon
siphon/cdmr/ncstream.py
data_type_to_numpy
def data_type_to_numpy(datatype, unsigned=False): """Convert an ncstream datatype to a numpy one.""" basic_type = _dtypeLookup[datatype] if datatype in (stream.STRING, stream.OPAQUE): return np.dtype(basic_type) if unsigned: basic_type = basic_type.replace('i', 'u') return np.dtype('=' + basic_type)
python
def data_type_to_numpy(datatype, unsigned=False): """Convert an ncstream datatype to a numpy one.""" basic_type = _dtypeLookup[datatype] if datatype in (stream.STRING, stream.OPAQUE): return np.dtype(basic_type) if unsigned: basic_type = basic_type.replace('i', 'u') return np.dtype('=' + basic_type)
[ "def", "data_type_to_numpy", "(", "datatype", ",", "unsigned", "=", "False", ")", ":", "basic_type", "=", "_dtypeLookup", "[", "datatype", "]", "if", "datatype", "in", "(", "stream", ".", "STRING", ",", "stream", ".", "OPAQUE", ")", ":", "return", "np", ".", "dtype", "(", "basic_type", ")", "if", "unsigned", ":", "basic_type", "=", "basic_type", ".", "replace", "(", "'i'", ",", "'u'", ")", "return", "np", ".", "dtype", "(", "'='", "+", "basic_type", ")" ]
Convert an ncstream datatype to a numpy one.
[ "Convert", "an", "ncstream", "datatype", "to", "a", "numpy", "one", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L310-L319
Unidata/siphon
siphon/cdmr/ncstream.py
struct_to_dtype
def struct_to_dtype(struct): """Convert a Structure specification to a numpy structured dtype.""" # str() around name necessary because protobuf gives unicode names, but dtype doesn't # support them on Python 2 fields = [(str(var.name), data_type_to_numpy(var.dataType, var.unsigned)) for var in struct.vars] for s in struct.structs: fields.append((str(s.name), struct_to_dtype(s))) log.debug('Structure fields: %s', fields) dt = np.dtype(fields) return dt
python
def struct_to_dtype(struct): """Convert a Structure specification to a numpy structured dtype.""" # str() around name necessary because protobuf gives unicode names, but dtype doesn't # support them on Python 2 fields = [(str(var.name), data_type_to_numpy(var.dataType, var.unsigned)) for var in struct.vars] for s in struct.structs: fields.append((str(s.name), struct_to_dtype(s))) log.debug('Structure fields: %s', fields) dt = np.dtype(fields) return dt
[ "def", "struct_to_dtype", "(", "struct", ")", ":", "# str() around name necessary because protobuf gives unicode names, but dtype doesn't", "# support them on Python 2", "fields", "=", "[", "(", "str", "(", "var", ".", "name", ")", ",", "data_type_to_numpy", "(", "var", ".", "dataType", ",", "var", ".", "unsigned", ")", ")", "for", "var", "in", "struct", ".", "vars", "]", "for", "s", "in", "struct", ".", "structs", ":", "fields", ".", "append", "(", "(", "str", "(", "s", ".", "name", ")", ",", "struct_to_dtype", "(", "s", ")", ")", ")", "log", ".", "debug", "(", "'Structure fields: %s'", ",", "fields", ")", "dt", "=", "np", ".", "dtype", "(", "fields", ")", "return", "dt" ]
Convert a Structure specification to a numpy structured dtype.
[ "Convert", "a", "Structure", "specification", "to", "a", "numpy", "structured", "dtype", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L322-L333
Unidata/siphon
siphon/cdmr/ncstream.py
unpack_variable
def unpack_variable(var): """Unpack an NCStream Variable into information we can use.""" # If we actually get a structure instance, handle turning that into a variable if var.dataType == stream.STRUCTURE: return None, struct_to_dtype(var), 'Structure' elif var.dataType == stream.SEQUENCE: log.warning('Sequence support not implemented!') dt = data_type_to_numpy(var.dataType, var.unsigned) if var.dataType == stream.OPAQUE: type_name = 'opaque' elif var.dataType == stream.STRING: type_name = 'string' else: type_name = dt.name if var.data: log.debug('Storing variable data: %s %s', dt, var.data) if var.dataType == stream.STRING: data = var.data else: # Always sent big endian data = np.frombuffer(var.data, dtype=dt.newbyteorder('>')) else: data = None return data, dt, type_name
python
def unpack_variable(var): """Unpack an NCStream Variable into information we can use.""" # If we actually get a structure instance, handle turning that into a variable if var.dataType == stream.STRUCTURE: return None, struct_to_dtype(var), 'Structure' elif var.dataType == stream.SEQUENCE: log.warning('Sequence support not implemented!') dt = data_type_to_numpy(var.dataType, var.unsigned) if var.dataType == stream.OPAQUE: type_name = 'opaque' elif var.dataType == stream.STRING: type_name = 'string' else: type_name = dt.name if var.data: log.debug('Storing variable data: %s %s', dt, var.data) if var.dataType == stream.STRING: data = var.data else: # Always sent big endian data = np.frombuffer(var.data, dtype=dt.newbyteorder('>')) else: data = None return data, dt, type_name
[ "def", "unpack_variable", "(", "var", ")", ":", "# If we actually get a structure instance, handle turning that into a variable", "if", "var", ".", "dataType", "==", "stream", ".", "STRUCTURE", ":", "return", "None", ",", "struct_to_dtype", "(", "var", ")", ",", "'Structure'", "elif", "var", ".", "dataType", "==", "stream", ".", "SEQUENCE", ":", "log", ".", "warning", "(", "'Sequence support not implemented!'", ")", "dt", "=", "data_type_to_numpy", "(", "var", ".", "dataType", ",", "var", ".", "unsigned", ")", "if", "var", ".", "dataType", "==", "stream", ".", "OPAQUE", ":", "type_name", "=", "'opaque'", "elif", "var", ".", "dataType", "==", "stream", ".", "STRING", ":", "type_name", "=", "'string'", "else", ":", "type_name", "=", "dt", ".", "name", "if", "var", ".", "data", ":", "log", ".", "debug", "(", "'Storing variable data: %s %s'", ",", "dt", ",", "var", ".", "data", ")", "if", "var", ".", "dataType", "==", "stream", ".", "STRING", ":", "data", "=", "var", ".", "data", "else", ":", "# Always sent big endian", "data", "=", "np", ".", "frombuffer", "(", "var", ".", "data", ",", "dtype", "=", "dt", ".", "newbyteorder", "(", "'>'", ")", ")", "else", ":", "data", "=", "None", "return", "data", ",", "dt", ",", "type_name" ]
Unpack an NCStream Variable into information we can use.
[ "Unpack", "an", "NCStream", "Variable", "into", "information", "we", "can", "use", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L336-L362
Unidata/siphon
siphon/cdmr/ncstream.py
unpack_attribute
def unpack_attribute(att): """Unpack an embedded attribute into a python or numpy object.""" if att.unsigned: log.warning('Unsupported unsigned attribute!') # TDS 5.0 now has a dataType attribute that takes precedence if att.len == 0: # Empty val = None elif att.dataType == stream.STRING: # Then look for new datatype string val = att.sdata elif att.dataType: # Then a non-zero new data type val = np.frombuffer(att.data, dtype='>' + _dtypeLookup[att.dataType], count=att.len) elif att.type: # Then non-zero old-data type0 val = np.frombuffer(att.data, dtype=_attrConverters[att.type], count=att.len) elif att.sdata: # This leaves both 0, try old string val = att.sdata else: # Assume new datatype is Char (0) val = np.array(att.data, dtype=_dtypeLookup[att.dataType]) if att.len == 1: val = val[0] return att.name, val
python
def unpack_attribute(att): """Unpack an embedded attribute into a python or numpy object.""" if att.unsigned: log.warning('Unsupported unsigned attribute!') # TDS 5.0 now has a dataType attribute that takes precedence if att.len == 0: # Empty val = None elif att.dataType == stream.STRING: # Then look for new datatype string val = att.sdata elif att.dataType: # Then a non-zero new data type val = np.frombuffer(att.data, dtype='>' + _dtypeLookup[att.dataType], count=att.len) elif att.type: # Then non-zero old-data type0 val = np.frombuffer(att.data, dtype=_attrConverters[att.type], count=att.len) elif att.sdata: # This leaves both 0, try old string val = att.sdata else: # Assume new datatype is Char (0) val = np.array(att.data, dtype=_dtypeLookup[att.dataType]) if att.len == 1: val = val[0] return att.name, val
[ "def", "unpack_attribute", "(", "att", ")", ":", "if", "att", ".", "unsigned", ":", "log", ".", "warning", "(", "'Unsupported unsigned attribute!'", ")", "# TDS 5.0 now has a dataType attribute that takes precedence", "if", "att", ".", "len", "==", "0", ":", "# Empty", "val", "=", "None", "elif", "att", ".", "dataType", "==", "stream", ".", "STRING", ":", "# Then look for new datatype string", "val", "=", "att", ".", "sdata", "elif", "att", ".", "dataType", ":", "# Then a non-zero new data type", "val", "=", "np", ".", "frombuffer", "(", "att", ".", "data", ",", "dtype", "=", "'>'", "+", "_dtypeLookup", "[", "att", ".", "dataType", "]", ",", "count", "=", "att", ".", "len", ")", "elif", "att", ".", "type", ":", "# Then non-zero old-data type0", "val", "=", "np", ".", "frombuffer", "(", "att", ".", "data", ",", "dtype", "=", "_attrConverters", "[", "att", ".", "type", "]", ",", "count", "=", "att", ".", "len", ")", "elif", "att", ".", "sdata", ":", "# This leaves both 0, try old string", "val", "=", "att", ".", "sdata", "else", ":", "# Assume new datatype is Char (0)", "val", "=", "np", ".", "array", "(", "att", ".", "data", ",", "dtype", "=", "_dtypeLookup", "[", "att", ".", "dataType", "]", ")", "if", "att", ".", "len", "==", "1", ":", "val", "=", "val", "[", "0", "]", "return", "att", ".", "name", ",", "val" ]
Unpack an embedded attribute into a python or numpy object.
[ "Unpack", "an", "embedded", "attribute", "into", "a", "python", "or", "numpy", "object", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L373-L397
Unidata/siphon
siphon/cdmr/ncstream.py
read_var_int
def read_var_int(file_obj): """Read a variable-length integer. Parameters ---------- file_obj : file-like object The file to read from. Returns ------- int the variable-length value read """ # Read all bytes from here, stopping with the first one that does not have # the MSB set. Save the lower 7 bits, and keep stacking to the *left*. val = 0 shift = 0 while True: # Read next byte next_val = ord(file_obj.read(1)) val |= ((next_val & 0x7F) << shift) shift += 7 if not next_val & 0x80: break return val
python
def read_var_int(file_obj): """Read a variable-length integer. Parameters ---------- file_obj : file-like object The file to read from. Returns ------- int the variable-length value read """ # Read all bytes from here, stopping with the first one that does not have # the MSB set. Save the lower 7 bits, and keep stacking to the *left*. val = 0 shift = 0 while True: # Read next byte next_val = ord(file_obj.read(1)) val |= ((next_val & 0x7F) << shift) shift += 7 if not next_val & 0x80: break return val
[ "def", "read_var_int", "(", "file_obj", ")", ":", "# Read all bytes from here, stopping with the first one that does not have", "# the MSB set. Save the lower 7 bits, and keep stacking to the *left*.", "val", "=", "0", "shift", "=", "0", "while", "True", ":", "# Read next byte", "next_val", "=", "ord", "(", "file_obj", ".", "read", "(", "1", ")", ")", "val", "|=", "(", "(", "next_val", "&", "0x7F", ")", "<<", "shift", ")", "shift", "+=", "7", "if", "not", "next_val", "&", "0x80", ":", "break", "return", "val" ]
Read a variable-length integer. Parameters ---------- file_obj : file-like object The file to read from. Returns ------- int the variable-length value read
[ "Read", "a", "variable", "-", "length", "integer", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L400-L426
Unidata/siphon
siphon/cdmr/cdmremote.py
CDMRemote.fetch_data
def fetch_data(self, **var): """Retrieve data from CDMRemote for one or more variables.""" varstr = ','.join(name + self._convert_indices(ind) for name, ind in var.items()) query = self.query().add_query_parameter(req='data', var=varstr) return self._fetch(query)
python
def fetch_data(self, **var): """Retrieve data from CDMRemote for one or more variables.""" varstr = ','.join(name + self._convert_indices(ind) for name, ind in var.items()) query = self.query().add_query_parameter(req='data', var=varstr) return self._fetch(query)
[ "def", "fetch_data", "(", "self", ",", "*", "*", "var", ")", ":", "varstr", "=", "','", ".", "join", "(", "name", "+", "self", ".", "_convert_indices", "(", "ind", ")", "for", "name", ",", "ind", "in", "var", ".", "items", "(", ")", ")", "query", "=", "self", ".", "query", "(", ")", ".", "add_query_parameter", "(", "req", "=", "'data'", ",", "var", "=", "varstr", ")", "return", "self", ".", "_fetch", "(", "query", ")" ]
Retrieve data from CDMRemote for one or more variables.
[ "Retrieve", "data", "from", "CDMRemote", "for", "one", "or", "more", "variables", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/cdmremote.py#L31-L36
Unidata/siphon
siphon/cdmr/cdmremote.py
CDMRemote.query
def query(self): """Generate a new query for CDMRemote. This handles turning on compression if necessary. Returns ------- HTTPQuery The created query. """ q = super(CDMRemote, self).query() # Turn on compression if it's been set on the object if self.deflate: q.add_query_parameter(deflate=self.deflate) return q
python
def query(self): """Generate a new query for CDMRemote. This handles turning on compression if necessary. Returns ------- HTTPQuery The created query. """ q = super(CDMRemote, self).query() # Turn on compression if it's been set on the object if self.deflate: q.add_query_parameter(deflate=self.deflate) return q
[ "def", "query", "(", "self", ")", ":", "q", "=", "super", "(", "CDMRemote", ",", "self", ")", ".", "query", "(", ")", "# Turn on compression if it's been set on the object", "if", "self", ".", "deflate", ":", "q", ".", "add_query_parameter", "(", "deflate", "=", "self", ".", "deflate", ")", "return", "q" ]
Generate a new query for CDMRemote. This handles turning on compression if necessary. Returns ------- HTTPQuery The created query.
[ "Generate", "a", "new", "query", "for", "CDMRemote", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/cdmremote.py#L46-L63
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
check_token
def check_token(func): """检查 access token 是否有效.""" @wraps(func) def wrapper(*args, **kwargs): response = func(*args, **kwargs) if response.status_code == 401: raise InvalidToken('Access token invalid or no longer valid') else: return response return wrapper
python
def check_token(func): """检查 access token 是否有效.""" @wraps(func) def wrapper(*args, **kwargs): response = func(*args, **kwargs) if response.status_code == 401: raise InvalidToken('Access token invalid or no longer valid') else: return response return wrapper
[ "def", "check_token", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "response", ".", "status_code", "==", "401", ":", "raise", "InvalidToken", "(", "'Access token invalid or no longer valid'", ")", "else", ":", "return", "response", "return", "wrapper" ]
检查 access token 是否有效.
[ "检查", "access", "token", "是否有效", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L22-L31
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.upload
def upload(self, remote_path, file_content, ondup=None, **kwargs): """上传单个文件(<2G). | 百度PCS服务目前支持最大2G的单个文件上传。 | 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。 :param remote_path: 网盘中文件的保存路径(包含文件名)。 必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param file_content: 上传文件的内容/文件对象 。 (e.g. ``open('foobar', 'rb')`` ) :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象 """ params = { 'path': remote_path, 'ondup': ondup } files = {'file': ('file', file_content, '')} url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file' return self._request('file', 'upload', url=url, extra_params=params, files=files, **kwargs)
python
def upload(self, remote_path, file_content, ondup=None, **kwargs): """上传单个文件(<2G). | 百度PCS服务目前支持最大2G的单个文件上传。 | 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。 :param remote_path: 网盘中文件的保存路径(包含文件名)。 必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param file_content: 上传文件的内容/文件对象 。 (e.g. ``open('foobar', 'rb')`` ) :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象 """ params = { 'path': remote_path, 'ondup': ondup } files = {'file': ('file', file_content, '')} url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file' return self._request('file', 'upload', url=url, extra_params=params, files=files, **kwargs)
[ "def", "upload", "(", "self", ",", "remote_path", ",", "file_content", ",", "ondup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", ",", "'ondup'", ":", "ondup", "}", "files", "=", "{", "'file'", ":", "(", "'file'", ",", "file_content", ",", "''", ")", "}", "url", "=", "'https://c.pcs.baidu.com/rest/2.0/pcs/file'", "return", "self", ".", "_request", "(", "'file'", ",", "'upload'", ",", "url", "=", "url", ",", "extra_params", "=", "params", ",", "files", "=", "files", ",", "*", "*", "kwargs", ")" ]
上传单个文件(<2G). | 百度PCS服务目前支持最大2G的单个文件上传。 | 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。 :param remote_path: 网盘中文件的保存路径(包含文件名)。 必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param file_content: 上传文件的内容/文件对象 。 (e.g. ``open('foobar', 'rb')`` ) :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象
[ "上传单个文件(<2G)", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L103-L135
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.upload_tmpfile
def upload_tmpfile(self, file_content, **kwargs): """分片上传—文件分片及上传. 百度 PCS 服务支持每次直接上传最大2G的单个文件。 如需支持上传超大文件(>2G),则可以通过组合调用分片文件上传的 ``upload_tmpfile`` 方法和 ``upload_superfile`` 方法实现: 1. 首先,将超大文件分割为2G以内的单文件,并调用 ``upload_tmpfile`` 将分片文件依次上传; 2. 其次,调用 ``upload_superfile`` ,完成分片文件的重组。 除此之外,如果应用中需要支持断点续传的功能, 也可以通过分片上传文件并调用 ``upload_superfile`` 接口的方式实现。 :param file_content: 上传文件的内容/文件对象 (e.g. ``open('foobar', 'rb')`` ) :return: Response 对象 """ params = { 'type': 'tmpfile' } files = {'file': ('file', file_content, '')} url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file' return self._request('file', 'upload', url=url, extra_params=params, files=files, **kwargs)
python
def upload_tmpfile(self, file_content, **kwargs): """分片上传—文件分片及上传. 百度 PCS 服务支持每次直接上传最大2G的单个文件。 如需支持上传超大文件(>2G),则可以通过组合调用分片文件上传的 ``upload_tmpfile`` 方法和 ``upload_superfile`` 方法实现: 1. 首先,将超大文件分割为2G以内的单文件,并调用 ``upload_tmpfile`` 将分片文件依次上传; 2. 其次,调用 ``upload_superfile`` ,完成分片文件的重组。 除此之外,如果应用中需要支持断点续传的功能, 也可以通过分片上传文件并调用 ``upload_superfile`` 接口的方式实现。 :param file_content: 上传文件的内容/文件对象 (e.g. ``open('foobar', 'rb')`` ) :return: Response 对象 """ params = { 'type': 'tmpfile' } files = {'file': ('file', file_content, '')} url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file' return self._request('file', 'upload', url=url, extra_params=params, files=files, **kwargs)
[ "def", "upload_tmpfile", "(", "self", ",", "file_content", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'type'", ":", "'tmpfile'", "}", "files", "=", "{", "'file'", ":", "(", "'file'", ",", "file_content", ",", "''", ")", "}", "url", "=", "'https://c.pcs.baidu.com/rest/2.0/pcs/file'", "return", "self", ".", "_request", "(", "'file'", ",", "'upload'", ",", "url", "=", "url", ",", "extra_params", "=", "params", ",", "files", "=", "files", ",", "*", "*", "kwargs", ")" ]
分片上传—文件分片及上传. 百度 PCS 服务支持每次直接上传最大2G的单个文件。 如需支持上传超大文件(>2G),则可以通过组合调用分片文件上传的 ``upload_tmpfile`` 方法和 ``upload_superfile`` 方法实现: 1. 首先,将超大文件分割为2G以内的单文件,并调用 ``upload_tmpfile`` 将分片文件依次上传; 2. 其次,调用 ``upload_superfile`` ,完成分片文件的重组。 除此之外,如果应用中需要支持断点续传的功能, 也可以通过分片上传文件并调用 ``upload_superfile`` 接口的方式实现。 :param file_content: 上传文件的内容/文件对象 (e.g. ``open('foobar', 'rb')`` ) :return: Response 对象
[ "分片上传—文件分片及上传", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L137-L163
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.upload_superfile
def upload_superfile(self, remote_path, block_list, ondup=None, **kwargs): """分片上传—合并分片文件. 与分片文件上传的 ``upload_tmpfile`` 方法配合使用, 可实现超大文件(>2G)上传,同时也可用于断点续传的场景。 :param remote_path: 网盘中文件的保存路径(包含文件名)。 必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param block_list: 子文件内容的 MD5 值列表;子文件至少两个,最多1024个。 :type block_list: list :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象 """ params = { 'path': remote_path, 'ondup': ondup } data = { 'param': json.dumps({'block_list': block_list}), } return self._request('file', 'createsuperfile', extra_params=params, data=data, **kwargs)
python
def upload_superfile(self, remote_path, block_list, ondup=None, **kwargs): """分片上传—合并分片文件. 与分片文件上传的 ``upload_tmpfile`` 方法配合使用, 可实现超大文件(>2G)上传,同时也可用于断点续传的场景。 :param remote_path: 网盘中文件的保存路径(包含文件名)。 必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param block_list: 子文件内容的 MD5 值列表;子文件至少两个,最多1024个。 :type block_list: list :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象 """ params = { 'path': remote_path, 'ondup': ondup } data = { 'param': json.dumps({'block_list': block_list}), } return self._request('file', 'createsuperfile', extra_params=params, data=data, **kwargs)
[ "def", "upload_superfile", "(", "self", ",", "remote_path", ",", "block_list", ",", "ondup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", ",", "'ondup'", ":", "ondup", "}", "data", "=", "{", "'param'", ":", "json", ".", "dumps", "(", "{", "'block_list'", ":", "block_list", "}", ")", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'createsuperfile'", ",", "extra_params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
分片上传—合并分片文件. 与分片文件上传的 ``upload_tmpfile`` 方法配合使用, 可实现超大文件(>2G)上传,同时也可用于断点续传的场景。 :param remote_path: 网盘中文件的保存路径(包含文件名)。 必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param block_list: 子文件内容的 MD5 值列表;子文件至少两个,最多1024个。 :type block_list: list :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象
[ "分片上传—合并分片文件", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L165-L198
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.mkdir
def mkdir(self, remote_path, **kwargs): """为当前用户创建一个目录. :param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ data = { 'path': remote_path } return self._request('file', 'mkdir', data=data, **kwargs)
python
def mkdir(self, remote_path, **kwargs): """为当前用户创建一个目录. :param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ data = { 'path': remote_path } return self._request('file', 'mkdir', data=data, **kwargs)
[ "def", "mkdir", "(", "self", ",", "remote_path", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'path'", ":", "remote_path", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'mkdir'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
为当前用户创建一个目录. :param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象
[ "为当前用户创建一个目录", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L232-L249
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.meta
def meta(self, remote_path, **kwargs): """获取单个文件或目录的元信息. :param remote_path: 网盘中文件/目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ params = { 'path': remote_path } return self._request('file', 'meta', extra_params=params, **kwargs)
python
def meta(self, remote_path, **kwargs): """获取单个文件或目录的元信息. :param remote_path: 网盘中文件/目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ params = { 'path': remote_path } return self._request('file', 'meta', extra_params=params, **kwargs)
[ "def", "meta", "(", "self", ",", "remote_path", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'meta'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
获取单个文件或目录的元信息. :param remote_path: 网盘中文件/目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象
[ "获取单个文件或目录的元信息", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L251-L268
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.list_files
def list_files(self, remote_path, by=None, order=None, limit=None, **kwargs): """获取目录下的文件列表. :param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param by: 排序字段,缺省根据文件类型排序: * time(修改时间) * name(文件名) * size(大小,注意目录无大小) :param order: “asc”或“desc”,缺省采用降序排序。 * asc(升序) * desc(降序) :param limit: 返回条目控制,参数格式为:n1-n2。 返回结果集的[n1, n2)之间的条目,缺省返回所有条目; n1从0开始。 :return: Response 对象 """ params = { 'path': remote_path, 'by': by, 'order': order, 'limit': limit } return self._request('file', 'list', extra_params=params, **kwargs)
python
def list_files(self, remote_path, by=None, order=None, limit=None, **kwargs): """获取目录下的文件列表. :param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param by: 排序字段,缺省根据文件类型排序: * time(修改时间) * name(文件名) * size(大小,注意目录无大小) :param order: “asc”或“desc”,缺省采用降序排序。 * asc(升序) * desc(降序) :param limit: 返回条目控制,参数格式为:n1-n2。 返回结果集的[n1, n2)之间的条目,缺省返回所有条目; n1从0开始。 :return: Response 对象 """ params = { 'path': remote_path, 'by': by, 'order': order, 'limit': limit } return self._request('file', 'list', extra_params=params, **kwargs)
[ "def", "list_files", "(", "self", ",", "remote_path", ",", "by", "=", "None", ",", "order", "=", "None", ",", "limit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", ",", "'by'", ":", "by", ",", "'order'", ":", "order", ",", "'limit'", ":", "limit", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'list'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
获取目录下的文件列表. :param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param by: 排序字段,缺省根据文件类型排序: * time(修改时间) * name(文件名) * size(大小,注意目录无大小) :param order: “asc”或“desc”,缺省采用降序排序。 * asc(升序) * desc(降序) :param limit: 返回条目控制,参数格式为:n1-n2。 返回结果集的[n1, n2)之间的条目,缺省返回所有条目; n1从0开始。 :return: Response 对象
[ "获取目录下的文件列表", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L292-L326
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.move
def move(self, from_path, to_path, **kwargs): """移动单个文件或目录. :param from_path: 源文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param to_path: 目标文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ data = { 'from': from_path, 'to': to_path, } return self._request('file', 'move', data=data, **kwargs)
python
def move(self, from_path, to_path, **kwargs): """移动单个文件或目录. :param from_path: 源文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param to_path: 目标文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ data = { 'from': from_path, 'to': to_path, } return self._request('file', 'move', data=data, **kwargs)
[ "def", "move", "(", "self", ",", "from_path", ",", "to_path", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'from'", ":", "from_path", ",", "'to'", ":", "to_path", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'move'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
移动单个文件或目录. :param from_path: 源文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param to_path: 目标文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象
[ "移动单个文件或目录", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L328-L354
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.multi_move
def multi_move(self, path_list, **kwargs): """批量移动文件或目录. :param path_list: 源文件地址和目标文件地址对列表: >>> path_list = [ ... ('/apps/test_sdk/test.txt', # 源文件 ... '/apps/test_sdk/testmkdir/b.txt' # 目标文件 ... ), ... ('/apps/test_sdk/test.txt', # 源文件 ... '/apps/test_sdk/testmkdir/b.txt' # 目标文件 ... ), ... ] .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type path_list: list :return: Response 对象 """ data = { 'param': json.dumps({ 'list': [{'from': x[0], 'to': x[1]} for x in path_list] }), } return self._request('file', 'move', data=data, **kwargs)
python
def multi_move(self, path_list, **kwargs): """批量移动文件或目录. :param path_list: 源文件地址和目标文件地址对列表: >>> path_list = [ ... ('/apps/test_sdk/test.txt', # 源文件 ... '/apps/test_sdk/testmkdir/b.txt' # 目标文件 ... ), ... ('/apps/test_sdk/test.txt', # 源文件 ... '/apps/test_sdk/testmkdir/b.txt' # 目标文件 ... ), ... ] .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type path_list: list :return: Response 对象 """ data = { 'param': json.dumps({ 'list': [{'from': x[0], 'to': x[1]} for x in path_list] }), } return self._request('file', 'move', data=data, **kwargs)
[ "def", "multi_move", "(", "self", ",", "path_list", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'param'", ":", "json", ".", "dumps", "(", "{", "'list'", ":", "[", "{", "'from'", ":", "x", "[", "0", "]", ",", "'to'", ":", "x", "[", "1", "]", "}", "for", "x", "in", "path_list", "]", "}", ")", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'move'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
批量移动文件或目录. :param path_list: 源文件地址和目标文件地址对列表: >>> path_list = [ ... ('/apps/test_sdk/test.txt', # 源文件 ... '/apps/test_sdk/testmkdir/b.txt' # 目标文件 ... ), ... ('/apps/test_sdk/test.txt', # 源文件 ... '/apps/test_sdk/testmkdir/b.txt' # 目标文件 ... ), ... ] .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type path_list: list :return: Response 对象
[ "批量移动文件或目录", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L356-L385
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.copy
def copy(self, from_path, to_path, **kwargs): """拷贝文件或目录. :param from_path: 源文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param to_path: 目标文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 .. warning:: ``move`` 操作后,源文件被移动至目标地址; ``copy`` 操作则会保留原文件。 """ data = { 'from': from_path, 'to': to_path, } return self._request('file', 'copy', data=data, **kwargs)
python
def copy(self, from_path, to_path, **kwargs): """拷贝文件或目录. :param from_path: 源文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param to_path: 目标文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 .. warning:: ``move`` 操作后,源文件被移动至目标地址; ``copy`` 操作则会保留原文件。 """ data = { 'from': from_path, 'to': to_path, } return self._request('file', 'copy', data=data, **kwargs)
[ "def", "copy", "(", "self", ",", "from_path", ",", "to_path", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'from'", ":", "from_path", ",", "'to'", ":", "to_path", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'copy'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
拷贝文件或目录. :param from_path: 源文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param to_path: 目标文件/目录在网盘中的路径(包括文件名)。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 .. warning:: ``move`` 操作后,源文件被移动至目标地址; ``copy`` 操作则会保留原文件。
[ "拷贝文件或目录", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L387-L417
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.delete
def delete(self, remote_path, **kwargs): """删除单个文件或目录. .. warning:: * 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放 不占用用户的空间配额; * 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。 :param remote_path: 网盘中文件/目录的路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :return: Response 对象 """ data = { 'path': remote_path } return self._request('file', 'delete', data=data, **kwargs)
python
def delete(self, remote_path, **kwargs): """删除单个文件或目录. .. warning:: * 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放 不占用用户的空间配额; * 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。 :param remote_path: 网盘中文件/目录的路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :return: Response 对象 """ data = { 'path': remote_path } return self._request('file', 'delete', data=data, **kwargs)
[ "def", "delete", "(", "self", ",", "remote_path", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'path'", ":", "remote_path", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'delete'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
删除单个文件或目录. .. warning:: * 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放 不占用用户的空间配额; * 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。 :param remote_path: 网盘中文件/目录的路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :return: Response 对象
[ "删除单个文件或目录", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L450-L473
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.multi_delete
def multi_delete(self, path_list, **kwargs): """批量删除文件或目录. .. warning:: * 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放 不占用用户的空间配额; * 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。 :param path_list: 网盘中文件/目录的路径列表,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type path_list: list :return: Response 对象 """ data = { 'param': json.dumps({ 'list': [{'path': path} for path in path_list] }), } return self._request('file', 'delete', data=data, **kwargs)
python
def multi_delete(self, path_list, **kwargs): """批量删除文件或目录. .. warning:: * 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放 不占用用户的空间配额; * 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。 :param path_list: 网盘中文件/目录的路径列表,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type path_list: list :return: Response 对象 """ data = { 'param': json.dumps({ 'list': [{'path': path} for path in path_list] }), } return self._request('file', 'delete', data=data, **kwargs)
[ "def", "multi_delete", "(", "self", ",", "path_list", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'param'", ":", "json", ".", "dumps", "(", "{", "'list'", ":", "[", "{", "'path'", ":", "path", "}", "for", "path", "in", "path_list", "]", "}", ")", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'delete'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
批量删除文件或目录. .. warning:: * 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放 不占用用户的空间配额; * 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。 :param path_list: 网盘中文件/目录的路径列表,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type path_list: list :return: Response 对象
[ "批量删除文件或目录", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L475-L500
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.search
def search(self, remote_path, keyword, recurrent='0', **kwargs): """按文件名搜索文件(不支持查找目录). :param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :param keyword: 关键词 :type keyword: str :param recurrent: 是否递归。 * "0"表示不递归 * "1"表示递归 :type recurrent: str :return: Response 对象 """ params = { 'path': remote_path, 'wd': keyword, 're': recurrent, } return self._request('file', 'search', extra_params=params, **kwargs)
python
def search(self, remote_path, keyword, recurrent='0', **kwargs): """按文件名搜索文件(不支持查找目录). :param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :param keyword: 关键词 :type keyword: str :param recurrent: 是否递归。 * "0"表示不递归 * "1"表示递归 :type recurrent: str :return: Response 对象 """ params = { 'path': remote_path, 'wd': keyword, 're': recurrent, } return self._request('file', 'search', extra_params=params, **kwargs)
[ "def", "search", "(", "self", ",", "remote_path", ",", "keyword", ",", "recurrent", "=", "'0'", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", ",", "'wd'", ":", "keyword", ",", "'re'", ":", "recurrent", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'search'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
按文件名搜索文件(不支持查找目录). :param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :param keyword: 关键词 :type keyword: str :param recurrent: 是否递归。 * "0"表示不递归 * "1"表示递归 :type recurrent: str :return: Response 对象
[ "按文件名搜索文件(不支持查找目录)", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L502-L529
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.thumbnail
def thumbnail(self, remote_path, height, width, quality=100, **kwargs): """获取指定图片文件的缩略图. :param remote_path: 源图片的路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param height: 指定缩略图的高度,取值范围为(0,1600]。 :type height: int :param width: 指定缩略图的宽度,取值范围为(0,1600]。 :type width: int :param quality: 缩略图的质量,默认为100,取值范围(0,100]。 :type quality: int :return: Response 对象 .. warning:: 有以下限制条件: * 原图大小(0, 10M]; * 原图类型: jpg、jpeg、bmp、gif、png; * 目标图类型:和原图的类型有关;例如:原图是gif图片, 则缩略后也为gif图片。 """ params = { 'path': remote_path, 'height': height, 'width': width, 'quality': quality, } return self._request('thumbnail', 'generate', extra_params=params, **kwargs)
python
def thumbnail(self, remote_path, height, width, quality=100, **kwargs): """获取指定图片文件的缩略图. :param remote_path: 源图片的路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param height: 指定缩略图的高度,取值范围为(0,1600]。 :type height: int :param width: 指定缩略图的宽度,取值范围为(0,1600]。 :type width: int :param quality: 缩略图的质量,默认为100,取值范围(0,100]。 :type quality: int :return: Response 对象 .. warning:: 有以下限制条件: * 原图大小(0, 10M]; * 原图类型: jpg、jpeg、bmp、gif、png; * 目标图类型:和原图的类型有关;例如:原图是gif图片, 则缩略后也为gif图片。 """ params = { 'path': remote_path, 'height': height, 'width': width, 'quality': quality, } return self._request('thumbnail', 'generate', extra_params=params, **kwargs)
[ "def", "thumbnail", "(", "self", ",", "remote_path", ",", "height", ",", "width", ",", "quality", "=", "100", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", ",", "'height'", ":", "height", ",", "'width'", ":", "width", ",", "'quality'", ":", "quality", ",", "}", "return", "self", ".", "_request", "(", "'thumbnail'", ",", "'generate'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
获取指定图片文件的缩略图. :param remote_path: 源图片的路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param height: 指定缩略图的高度,取值范围为(0,1600]。 :type height: int :param width: 指定缩略图的宽度,取值范围为(0,1600]。 :type width: int :param quality: 缩略图的质量,默认为100,取值范围(0,100]。 :type quality: int :return: Response 对象 .. warning:: 有以下限制条件: * 原图大小(0, 10M]; * 原图类型: jpg、jpeg、bmp、gif、png; * 目标图类型:和原图的类型有关;例如:原图是gif图片, 则缩略后也为gif图片。
[ "获取指定图片文件的缩略图", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L531-L566
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.diff
def diff(self, cursor='null', **kwargs): """文件增量更新操作查询接口. 本接口有数秒延迟,但保证返回结果为最终一致. :param cursor: 用于标记更新断点。 * 首次调用cursor=null; * 非首次调用,使用最后一次调用diff接口的返回结果 中的cursor。 :type cursor: str :return: Response 对象 """ params = { 'cursor': cursor, } return self._request('file', 'diff', extra_params=params, **kwargs)
python
def diff(self, cursor='null', **kwargs): """文件增量更新操作查询接口. 本接口有数秒延迟,但保证返回结果为最终一致. :param cursor: 用于标记更新断点。 * 首次调用cursor=null; * 非首次调用,使用最后一次调用diff接口的返回结果 中的cursor。 :type cursor: str :return: Response 对象 """ params = { 'cursor': cursor, } return self._request('file', 'diff', extra_params=params, **kwargs)
[ "def", "diff", "(", "self", ",", "cursor", "=", "'null'", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'cursor'", ":", "cursor", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'diff'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
文件增量更新操作查询接口. 本接口有数秒延迟,但保证返回结果为最终一致. :param cursor: 用于标记更新断点。 * 首次调用cursor=null; * 非首次调用,使用最后一次调用diff接口的返回结果 中的cursor。 :type cursor: str :return: Response 对象
[ "文件增量更新操作查询接口", ".", "本接口有数秒延迟,但保证返回结果为最终一致", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L568-L584
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.video_convert
def video_convert(self, remote_path, video_type, **kwargs): """对视频文件进行转码,实现实时观看视频功能. 可下载支持 HLS/M3U8 的 `媒体云播放器 SDK <HLSSDK_>`__ 配合使用. .. _HLSSDK: http://developer.baidu.com/wiki/index.php?title=docs/cplat/media/sdk :param remote_path: 需要下载的视频文件路径,以/开头的绝对路径, 需含源文件的文件名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :param video_type: 目前支持以下格式: M3U8_320_240、M3U8_480_224、M3U8_480_360、 M3U8_640_480和M3U8_854_480 :type video_type: str :return: Response 对象 .. warning:: 目前这个接口支持的源文件格式如下: +--------------------------+------------+--------------------------+ |格式名称 |扩展名 |备注 | +==========================+============+==========================+ |Apple HTTP Live Streaming |m3u8/m3u |iOS支持的视频格式 | +--------------------------+------------+--------------------------+ |ASF |asf |视频格式 | +--------------------------+------------+--------------------------+ |AVI |avi |视频格式 | +--------------------------+------------+--------------------------+ |Flash Video (FLV) |flv |Macromedia Flash视频格式 | +--------------------------+------------+--------------------------+ |GIF Animation |gif |视频格式 | +--------------------------+------------+--------------------------+ |Matroska |mkv |Matroska/WebM视频格式 | +--------------------------+------------+--------------------------+ |MOV/QuickTime/MP4 |mov/mp4/m4a/|支持3GP、3GP2、PSP、iPod | | |3gp/3g2/mj2 |之类视频格式 | +--------------------------+------------+--------------------------+ |MPEG-PS (program stream) |mpeg |也就是VOB文件/SVCD/DVD格式| +--------------------------+------------+--------------------------+ |MPEG-TS (transport stream)|ts | 即DVB传输流 | +--------------------------+------------+--------------------------+ |RealMedia |rm/rmvb | Real视频格式 | +--------------------------+------------+--------------------------+ |WebM |webm | Html视频格式 | +--------------------------+------------+--------------------------+ """ params = { 'path': remote_path, 'type': video_type, } return self._request('file', 'streaming', extra_params=params, **kwargs)
python
def video_convert(self, remote_path, video_type, **kwargs): """对视频文件进行转码,实现实时观看视频功能. 可下载支持 HLS/M3U8 的 `媒体云播放器 SDK <HLSSDK_>`__ 配合使用. .. _HLSSDK: http://developer.baidu.com/wiki/index.php?title=docs/cplat/media/sdk :param remote_path: 需要下载的视频文件路径,以/开头的绝对路径, 需含源文件的文件名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :param video_type: 目前支持以下格式: M3U8_320_240、M3U8_480_224、M3U8_480_360、 M3U8_640_480和M3U8_854_480 :type video_type: str :return: Response 对象 .. warning:: 目前这个接口支持的源文件格式如下: +--------------------------+------------+--------------------------+ |格式名称 |扩展名 |备注 | +==========================+============+==========================+ |Apple HTTP Live Streaming |m3u8/m3u |iOS支持的视频格式 | +--------------------------+------------+--------------------------+ |ASF |asf |视频格式 | +--------------------------+------------+--------------------------+ |AVI |avi |视频格式 | +--------------------------+------------+--------------------------+ |Flash Video (FLV) |flv |Macromedia Flash视频格式 | +--------------------------+------------+--------------------------+ |GIF Animation |gif |视频格式 | +--------------------------+------------+--------------------------+ |Matroska |mkv |Matroska/WebM视频格式 | +--------------------------+------------+--------------------------+ |MOV/QuickTime/MP4 |mov/mp4/m4a/|支持3GP、3GP2、PSP、iPod | | |3gp/3g2/mj2 |之类视频格式 | +--------------------------+------------+--------------------------+ |MPEG-PS (program stream) |mpeg |也就是VOB文件/SVCD/DVD格式| +--------------------------+------------+--------------------------+ |MPEG-TS (transport stream)|ts | 即DVB传输流 | +--------------------------+------------+--------------------------+ |RealMedia |rm/rmvb | Real视频格式 | +--------------------------+------------+--------------------------+ |WebM |webm | Html视频格式 | +--------------------------+------------+--------------------------+ """ params = { 'path': remote_path, 'type': video_type, } return self._request('file', 'streaming', extra_params=params, **kwargs)
[ "def", "video_convert", "(", "self", ",", "remote_path", ",", "video_type", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", ",", "'type'", ":", "video_type", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'streaming'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
对视频文件进行转码,实现实时观看视频功能. 可下载支持 HLS/M3U8 的 `媒体云播放器 SDK <HLSSDK_>`__ 配合使用. .. _HLSSDK: http://developer.baidu.com/wiki/index.php?title=docs/cplat/media/sdk :param remote_path: 需要下载的视频文件路径,以/开头的绝对路径, 需含源文件的文件名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :param video_type: 目前支持以下格式: M3U8_320_240、M3U8_480_224、M3U8_480_360、 M3U8_640_480和M3U8_854_480 :type video_type: str :return: Response 对象 .. warning:: 目前这个接口支持的源文件格式如下: +--------------------------+------------+--------------------------+ |格式名称 |扩展名 |备注 | +==========================+============+==========================+ |Apple HTTP Live Streaming |m3u8/m3u |iOS支持的视频格式 | +--------------------------+------------+--------------------------+ |ASF |asf |视频格式 | +--------------------------+------------+--------------------------+ |AVI |avi |视频格式 | +--------------------------+------------+--------------------------+ |Flash Video (FLV) |flv |Macromedia Flash视频格式 | +--------------------------+------------+--------------------------+ |GIF Animation |gif |视频格式 | +--------------------------+------------+--------------------------+ |Matroska |mkv |Matroska/WebM视频格式 | +--------------------------+------------+--------------------------+ |MOV/QuickTime/MP4 |mov/mp4/m4a/|支持3GP、3GP2、PSP、iPod | | |3gp/3g2/mj2 |之类视频格式 | +--------------------------+------------+--------------------------+ |MPEG-PS (program stream) |mpeg |也就是VOB文件/SVCD/DVD格式| +--------------------------+------------+--------------------------+ |MPEG-TS (transport stream)|ts | 即DVB传输流 | +--------------------------+------------+--------------------------+ |RealMedia |rm/rmvb | Real视频格式 | +--------------------------+------------+--------------------------+ |WebM |webm | Html视频格式 | +--------------------------+------------+--------------------------+
[ "对视频文件进行转码,实现实时观看视频功能", ".", "可下载支持", "HLS", "/", "M3U8", "的", "媒体云播放器", "SDK", "<HLSSDK_", ">", "__", "配合使用", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L586-L645
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.list_streams
def list_streams(self, file_type, start=0, limit=100, filter_path=None, **kwargs): """以视频、音频、图片及文档四种类型的视图获取所创建应用程序下的 文件列表. :param file_type: 类型分为video、audio、image及doc四种。 :param start: 返回条目控制起始值,缺省值为0。 :param limit: 返回条目控制长度,缺省为1000,可配置。 :param filter_path: 需要过滤的前缀路径,如:/apps/album .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ params = { 'type': file_type, 'start': start, 'limit': limit, 'filter_path': filter_path, } return self._request('stream', 'list', extra_params=params, **kwargs)
python
def list_streams(self, file_type, start=0, limit=100, filter_path=None, **kwargs): """以视频、音频、图片及文档四种类型的视图获取所创建应用程序下的 文件列表. :param file_type: 类型分为video、audio、image及doc四种。 :param start: 返回条目控制起始值,缺省值为0。 :param limit: 返回条目控制长度,缺省为1000,可配置。 :param filter_path: 需要过滤的前缀路径,如:/apps/album .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ params = { 'type': file_type, 'start': start, 'limit': limit, 'filter_path': filter_path, } return self._request('stream', 'list', extra_params=params, **kwargs)
[ "def", "list_streams", "(", "self", ",", "file_type", ",", "start", "=", "0", ",", "limit", "=", "100", ",", "filter_path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'type'", ":", "file_type", ",", "'start'", ":", "start", ",", "'limit'", ":", "limit", ",", "'filter_path'", ":", "filter_path", ",", "}", "return", "self", ".", "_request", "(", "'stream'", ",", "'list'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
以视频、音频、图片及文档四种类型的视图获取所创建应用程序下的 文件列表. :param file_type: 类型分为video、audio、image及doc四种。 :param start: 返回条目控制起始值,缺省值为0。 :param limit: 返回条目控制长度,缺省为1000,可配置。 :param filter_path: 需要过滤的前缀路径,如:/apps/album .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象
[ "以视频、音频、图片及文档四种类型的视图获取所创建应用程序下的", "文件列表", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L647-L673
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.download_stream
def download_stream(self, remote_path, **kwargs): """为当前用户下载一个流式文件.其参数和返回结果与下载单个文件的相同. :param remote_path: 需要下载的文件路径,以/开头的绝对路径,含文件名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ params = { 'path': remote_path, } url = 'https://d.pcs.baidu.com/rest/2.0/pcs/file' return self._request('stream', 'download', url=url, extra_params=params, **kwargs)
python
def download_stream(self, remote_path, **kwargs): """为当前用户下载一个流式文件.其参数和返回结果与下载单个文件的相同. :param remote_path: 需要下载的文件路径,以/开头的绝对路径,含文件名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ params = { 'path': remote_path, } url = 'https://d.pcs.baidu.com/rest/2.0/pcs/file' return self._request('stream', 'download', url=url, extra_params=params, **kwargs)
[ "def", "download_stream", "(", "self", ",", "remote_path", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", ",", "}", "url", "=", "'https://d.pcs.baidu.com/rest/2.0/pcs/file'", "return", "self", ".", "_request", "(", "'stream'", ",", "'download'", ",", "url", "=", "url", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
为当前用户下载一个流式文件.其参数和返回结果与下载单个文件的相同. :param remote_path: 需要下载的文件路径,以/开头的绝对路径,含文件名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象
[ "为当前用户下载一个流式文件", ".", "其参数和返回结果与下载单个文件的相同", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L675-L694
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.rapid_upload
def rapid_upload(self, remote_path, content_length, content_md5, content_crc32, slice_md5, ondup=None, **kwargs): """秒传一个文件. .. warning:: * 被秒传文件必须大于256KB(即 256*1024 B)。 * 校验段为文件的前256KB,秒传接口需要提供校验段的MD5。 (非强一致接口,上传后请等待1秒后再读取) :param remote_path: 上传文件的全路径名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param content_length: 待秒传文件的长度。 :param content_md5: 待秒传文件的MD5。 :param content_crc32: 待秒传文件的CRC32。 :param slice_md5: 待秒传文件校验段的MD5。 :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象 """ data = { 'path': remote_path, 'content-length': content_length, 'content-md5': content_md5, 'content-crc32': content_crc32, 'slice-md5': slice_md5, 'ondup': ondup, } return self._request('file', 'rapidupload', data=data, **kwargs)
python
def rapid_upload(self, remote_path, content_length, content_md5, content_crc32, slice_md5, ondup=None, **kwargs): """秒传一个文件. .. warning:: * 被秒传文件必须大于256KB(即 256*1024 B)。 * 校验段为文件的前256KB,秒传接口需要提供校验段的MD5。 (非强一致接口,上传后请等待1秒后再读取) :param remote_path: 上传文件的全路径名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param content_length: 待秒传文件的长度。 :param content_md5: 待秒传文件的MD5。 :param content_crc32: 待秒传文件的CRC32。 :param slice_md5: 待秒传文件校验段的MD5。 :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象 """ data = { 'path': remote_path, 'content-length': content_length, 'content-md5': content_md5, 'content-crc32': content_crc32, 'slice-md5': slice_md5, 'ondup': ondup, } return self._request('file', 'rapidupload', data=data, **kwargs)
[ "def", "rapid_upload", "(", "self", ",", "remote_path", ",", "content_length", ",", "content_md5", ",", "content_crc32", ",", "slice_md5", ",", "ondup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'path'", ":", "remote_path", ",", "'content-length'", ":", "content_length", ",", "'content-md5'", ":", "content_md5", ",", "'content-crc32'", ":", "content_crc32", ",", "'slice-md5'", ":", "slice_md5", ",", "'ondup'", ":", "ondup", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'rapidupload'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
秒传一个文件. .. warning:: * 被秒传文件必须大于256KB(即 256*1024 B)。 * 校验段为文件的前256KB,秒传接口需要提供校验段的MD5。 (非强一致接口,上传后请等待1秒后再读取) :param remote_path: 上传文件的全路径名。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param content_length: 待秒传文件的长度。 :param content_md5: 待秒传文件的MD5。 :param content_crc32: 待秒传文件的CRC32。 :param slice_md5: 待秒传文件校验段的MD5。 :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象
[ "秒传一个文件", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L696-L732
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.add_download_task
def add_download_task(self, source_url, remote_path, rate_limit=None, timeout=60 * 60, expires=None, callback='', **kwargs): """添加离线下载任务,实现单个文件离线下载. :param source_url: 源文件的URL。 :param remote_path: 下载后的文件保存路径。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param rate_limit: 下载限速,默认不限速。 :type rate_limit: int or long :param timeout: 下载超时时间,默认3600秒。 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :param callback: 下载完毕后的回调,默认为空。 :type callback: str :return: Response 对象 """ data = { 'source_url': source_url, 'save_path': remote_path, 'expires': expires, 'rate_limit': rate_limit, 'timeout': timeout, 'callback': callback, } return self._request('services/cloud_dl', 'add_task', data=data, **kwargs)
python
def add_download_task(self, source_url, remote_path, rate_limit=None, timeout=60 * 60, expires=None, callback='', **kwargs): """添加离线下载任务,实现单个文件离线下载. :param source_url: 源文件的URL。 :param remote_path: 下载后的文件保存路径。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param rate_limit: 下载限速,默认不限速。 :type rate_limit: int or long :param timeout: 下载超时时间,默认3600秒。 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :param callback: 下载完毕后的回调,默认为空。 :type callback: str :return: Response 对象 """ data = { 'source_url': source_url, 'save_path': remote_path, 'expires': expires, 'rate_limit': rate_limit, 'timeout': timeout, 'callback': callback, } return self._request('services/cloud_dl', 'add_task', data=data, **kwargs)
[ "def", "add_download_task", "(", "self", ",", "source_url", ",", "remote_path", ",", "rate_limit", "=", "None", ",", "timeout", "=", "60", "*", "60", ",", "expires", "=", "None", ",", "callback", "=", "''", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'source_url'", ":", "source_url", ",", "'save_path'", ":", "remote_path", ",", "'expires'", ":", "expires", ",", "'rate_limit'", ":", "rate_limit", ",", "'timeout'", ":", "timeout", ",", "'callback'", ":", "callback", ",", "}", "return", "self", ".", "_request", "(", "'services/cloud_dl'", ",", "'add_task'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
添加离线下载任务,实现单个文件离线下载. :param source_url: 源文件的URL。 :param remote_path: 下载后的文件保存路径。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param rate_limit: 下载限速,默认不限速。 :type rate_limit: int or long :param timeout: 下载超时时间,默认3600秒。 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :param callback: 下载完毕后的回调,默认为空。 :type callback: str :return: Response 对象
[ "添加离线下载任务,实现单个文件离线下载", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L734-L767
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.query_download_tasks
def query_download_tasks(self, task_ids, operate_type=1, expires=None, **kwargs): """根据任务ID号,查询离线下载任务信息及进度信息。 :param task_ids: 要查询的任务ID列表 :type task_ids: list or tuple :param operate_type: * 0:查任务信息 * 1:查进度信息,默认为1 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象 """ params = { 'task_ids': ','.join(map(str, task_ids)), 'op_type': operate_type, 'expires': expires, } return self._request('services/cloud_dl', 'query_task', extra_params=params, **kwargs)
python
def query_download_tasks(self, task_ids, operate_type=1, expires=None, **kwargs): """根据任务ID号,查询离线下载任务信息及进度信息。 :param task_ids: 要查询的任务ID列表 :type task_ids: list or tuple :param operate_type: * 0:查任务信息 * 1:查进度信息,默认为1 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象 """ params = { 'task_ids': ','.join(map(str, task_ids)), 'op_type': operate_type, 'expires': expires, } return self._request('services/cloud_dl', 'query_task', extra_params=params, **kwargs)
[ "def", "query_download_tasks", "(", "self", ",", "task_ids", ",", "operate_type", "=", "1", ",", "expires", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'task_ids'", ":", "','", ".", "join", "(", "map", "(", "str", ",", "task_ids", ")", ")", ",", "'op_type'", ":", "operate_type", ",", "'expires'", ":", "expires", ",", "}", "return", "self", ".", "_request", "(", "'services/cloud_dl'", ",", "'query_task'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
根据任务ID号,查询离线下载任务信息及进度信息。 :param task_ids: 要查询的任务ID列表 :type task_ids: list or tuple :param operate_type: * 0:查任务信息 * 1:查进度信息,默认为1 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象
[ "根据任务ID号,查询离线下载任务信息及进度信息。" ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L769-L789
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.list_download_tasks
def list_download_tasks(self, need_task_info=1, start=0, limit=10, asc=0, create_time=None, status=None, source_url=None, remote_path=None, expires=None, **kwargs): """查询离线下载任务ID列表及任务信息. :param need_task_info: 是否需要返回任务信息: * 0:不需要 * 1:需要,默认为1 :param start: 查询任务起始位置,默认为0。 :param limit: 设定返回任务数量,默认为10。 :param asc: * 0:降序,默认值 * 1:升序 :param create_time: 任务创建时间,默认为空。 :type create_time: int :param status: 任务状态,默认为空。 0:下载成功,1:下载进行中 2:系统错误,3:资源不存在, 4:下载超时,5:资源存在但下载失败, 6:存储空间不足, 7:目标地址数据已存在, 8:任务取消. :type status: int :param source_url: 源地址URL,默认为空。 :param remote_path: 文件保存路径,默认为空。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象 """ data = { 'expires': expires, 'start': start, 'limit': limit, 'asc': asc, 'source_url': source_url, 'save_path': remote_path, 'create_time': create_time, 'status': status, 'need_task_info': need_task_info, } return self._request('services/cloud_dl', 'list_task', data=data, **kwargs)
python
def list_download_tasks(self, need_task_info=1, start=0, limit=10, asc=0, create_time=None, status=None, source_url=None, remote_path=None, expires=None, **kwargs): """查询离线下载任务ID列表及任务信息. :param need_task_info: 是否需要返回任务信息: * 0:不需要 * 1:需要,默认为1 :param start: 查询任务起始位置,默认为0。 :param limit: 设定返回任务数量,默认为10。 :param asc: * 0:降序,默认值 * 1:升序 :param create_time: 任务创建时间,默认为空。 :type create_time: int :param status: 任务状态,默认为空。 0:下载成功,1:下载进行中 2:系统错误,3:资源不存在, 4:下载超时,5:资源存在但下载失败, 6:存储空间不足, 7:目标地址数据已存在, 8:任务取消. :type status: int :param source_url: 源地址URL,默认为空。 :param remote_path: 文件保存路径,默认为空。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象 """ data = { 'expires': expires, 'start': start, 'limit': limit, 'asc': asc, 'source_url': source_url, 'save_path': remote_path, 'create_time': create_time, 'status': status, 'need_task_info': need_task_info, } return self._request('services/cloud_dl', 'list_task', data=data, **kwargs)
[ "def", "list_download_tasks", "(", "self", ",", "need_task_info", "=", "1", ",", "start", "=", "0", ",", "limit", "=", "10", ",", "asc", "=", "0", ",", "create_time", "=", "None", ",", "status", "=", "None", ",", "source_url", "=", "None", ",", "remote_path", "=", "None", ",", "expires", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'expires'", ":", "expires", ",", "'start'", ":", "start", ",", "'limit'", ":", "limit", ",", "'asc'", ":", "asc", ",", "'source_url'", ":", "source_url", ",", "'save_path'", ":", "remote_path", ",", "'create_time'", ":", "create_time", ",", "'status'", ":", "status", ",", "'need_task_info'", ":", "need_task_info", ",", "}", "return", "self", ".", "_request", "(", "'services/cloud_dl'", ",", "'list_task'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
查询离线下载任务ID列表及任务信息. :param need_task_info: 是否需要返回任务信息: * 0:不需要 * 1:需要,默认为1 :param start: 查询任务起始位置,默认为0。 :param limit: 设定返回任务数量,默认为10。 :param asc: * 0:降序,默认值 * 1:升序 :param create_time: 任务创建时间,默认为空。 :type create_time: int :param status: 任务状态,默认为空。 0:下载成功,1:下载进行中 2:系统错误,3:资源不存在, 4:下载超时,5:资源存在但下载失败, 6:存储空间不足, 7:目标地址数据已存在, 8:任务取消. :type status: int :param source_url: 源地址URL,默认为空。 :param remote_path: 文件保存路径,默认为空。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象
[ "查询离线下载任务ID列表及任务信息", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L791-L839
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.cancel_download_task
def cancel_download_task(self, task_id, expires=None, **kwargs): """取消离线下载任务. :param task_id: 要取消的任务ID号。 :type task_id: str :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象 """ data = { 'expires': expires, 'task_id': task_id, } return self._request('services/cloud_dl', 'cancle_task', data=data, **kwargs)
python
def cancel_download_task(self, task_id, expires=None, **kwargs): """取消离线下载任务. :param task_id: 要取消的任务ID号。 :type task_id: str :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象 """ data = { 'expires': expires, 'task_id': task_id, } return self._request('services/cloud_dl', 'cancle_task', data=data, **kwargs)
[ "def", "cancel_download_task", "(", "self", ",", "task_id", ",", "expires", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'expires'", ":", "expires", ",", "'task_id'", ":", "task_id", ",", "}", "return", "self", ".", "_request", "(", "'services/cloud_dl'", ",", "'cancle_task'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
取消离线下载任务. :param task_id: 要取消的任务ID号。 :type task_id: str :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象
[ "取消离线下载任务", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L841-L856
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.list_recycle_bin
def list_recycle_bin(self, start=0, limit=1000, **kwargs): """获取回收站中的文件及目录列表. :param start: 返回条目的起始值,缺省值为0 :param limit: 返回条目的长度,缺省值为1000 :return: Response 对象 """ params = { 'start': start, 'limit': limit, } return self._request('file', 'listrecycle', extra_params=params, **kwargs)
python
def list_recycle_bin(self, start=0, limit=1000, **kwargs): """获取回收站中的文件及目录列表. :param start: 返回条目的起始值,缺省值为0 :param limit: 返回条目的长度,缺省值为1000 :return: Response 对象 """ params = { 'start': start, 'limit': limit, } return self._request('file', 'listrecycle', extra_params=params, **kwargs)
[ "def", "list_recycle_bin", "(", "self", ",", "start", "=", "0", ",", "limit", "=", "1000", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'start'", ":", "start", ",", "'limit'", ":", "limit", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'listrecycle'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
获取回收站中的文件及目录列表. :param start: 返回条目的起始值,缺省值为0 :param limit: 返回条目的长度,缺省值为1000 :return: Response 对象
[ "获取回收站中的文件及目录列表", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L858-L871
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.restore_recycle_bin
def restore_recycle_bin(self, fs_id, **kwargs): """还原单个文件或目录(非强一致接口,调用后请sleep 1秒读取). :param fs_id: 所还原的文件或目录在PCS的临时唯一标识ID。 :type fs_id: str :return: Response 对象 """ data = { 'fs_id': fs_id, } return self._request('file', 'restore', data=data, **kwargs)
python
def restore_recycle_bin(self, fs_id, **kwargs): """还原单个文件或目录(非强一致接口,调用后请sleep 1秒读取). :param fs_id: 所还原的文件或目录在PCS的临时唯一标识ID。 :type fs_id: str :return: Response 对象 """ data = { 'fs_id': fs_id, } return self._request('file', 'restore', data=data, **kwargs)
[ "def", "restore_recycle_bin", "(", "self", ",", "fs_id", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'fs_id'", ":", "fs_id", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'restore'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
还原单个文件或目录(非强一致接口,调用后请sleep 1秒读取). :param fs_id: 所还原的文件或目录在PCS的临时唯一标识ID。 :type fs_id: str :return: Response 对象
[ "还原单个文件或目录(非强一致接口,调用后请sleep", "1秒读取)", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L873-L884
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
PCS.multi_restore_recycle_bin
def multi_restore_recycle_bin(self, fs_ids, **kwargs): """批量还原文件或目录(非强一致接口,调用后请sleep1秒 ). :param fs_ids: 所还原的文件或目录在 PCS 的临时唯一标识 ID 的列表。 :type fs_ids: list or tuple :return: Response 对象 """ data = { 'param': json.dumps({ 'list': [{'fs_id': fs_id} for fs_id in fs_ids] }), } return self._request('file', 'restore', data=data, **kwargs)
python
def multi_restore_recycle_bin(self, fs_ids, **kwargs): """批量还原文件或目录(非强一致接口,调用后请sleep1秒 ). :param fs_ids: 所还原的文件或目录在 PCS 的临时唯一标识 ID 的列表。 :type fs_ids: list or tuple :return: Response 对象 """ data = { 'param': json.dumps({ 'list': [{'fs_id': fs_id} for fs_id in fs_ids] }), } return self._request('file', 'restore', data=data, **kwargs)
[ "def", "multi_restore_recycle_bin", "(", "self", ",", "fs_ids", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'param'", ":", "json", ".", "dumps", "(", "{", "'list'", ":", "[", "{", "'fs_id'", ":", "fs_id", "}", "for", "fs_id", "in", "fs_ids", "]", "}", ")", ",", "}", "return", "self", ".", "_request", "(", "'file'", ",", "'restore'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
批量还原文件或目录(非强一致接口,调用后请sleep1秒 ). :param fs_ids: 所还原的文件或目录在 PCS 的临时唯一标识 ID 的列表。 :type fs_ids: list or tuple :return: Response 对象
[ "批量还原文件或目录(非强一致接口,调用后请sleep1秒", ")", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L886-L899
mozillazg/baidu-pcs-python-sdk
baidupcs/tools.py
get_new_access_token
def get_new_access_token(refresh_token, client_id, client_secret, scope=None, **kwargs): """使用 Refresh Token 刷新以获得新的 Access Token. :param refresh_token: 用于刷新 Access Token 用的 Refresh Token; :param client_id: 应用的 API Key; :param client_secret: 应用的 Secret Key; :param scope: 以空格分隔的权限列表,若不传递此参数,代表请求的数据访问 操作权限与上次获取 Access Token 时一致。通过 Refresh Token 刷新 Access Token 时所要求的 scope 权限范围必须小于等于上次 获取 Access Token 时授予的权限范围。 关于权限的具体信息请参考 “ `权限列表`__ ”。 :return: Response 对象 关于 ``response.json()`` 字典的内容所代表的含义, 请参考 `相关的百度帮助文档`__ 。 __ http://developer.baidu.com/wiki/index.php?title=docs/oauth/baiduoauth/list __ http://developer.baidu.com/wiki/index.php?title=docs/oauth/refresh """ data = { 'grant_type': 'refresh_token', 'refresh_token': refresh_token, 'client_id': client_id, 'client_secret': client_secret, } if scope: data['scope'] = scope url = 'https://openapi.baidu.com/oauth/2.0/token' return requests.post(url, data=data)
python
def get_new_access_token(refresh_token, client_id, client_secret, scope=None, **kwargs): """使用 Refresh Token 刷新以获得新的 Access Token. :param refresh_token: 用于刷新 Access Token 用的 Refresh Token; :param client_id: 应用的 API Key; :param client_secret: 应用的 Secret Key; :param scope: 以空格分隔的权限列表,若不传递此参数,代表请求的数据访问 操作权限与上次获取 Access Token 时一致。通过 Refresh Token 刷新 Access Token 时所要求的 scope 权限范围必须小于等于上次 获取 Access Token 时授予的权限范围。 关于权限的具体信息请参考 “ `权限列表`__ ”。 :return: Response 对象 关于 ``response.json()`` 字典的内容所代表的含义, 请参考 `相关的百度帮助文档`__ 。 __ http://developer.baidu.com/wiki/index.php?title=docs/oauth/baiduoauth/list __ http://developer.baidu.com/wiki/index.php?title=docs/oauth/refresh """ data = { 'grant_type': 'refresh_token', 'refresh_token': refresh_token, 'client_id': client_id, 'client_secret': client_secret, } if scope: data['scope'] = scope url = 'https://openapi.baidu.com/oauth/2.0/token' return requests.post(url, data=data)
[ "def", "get_new_access_token", "(", "refresh_token", ",", "client_id", ",", "client_secret", ",", "scope", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'grant_type'", ":", "'refresh_token'", ",", "'refresh_token'", ":", "refresh_token", ",", "'client_id'", ":", "client_id", ",", "'client_secret'", ":", "client_secret", ",", "}", "if", "scope", ":", "data", "[", "'scope'", "]", "=", "scope", "url", "=", "'https://openapi.baidu.com/oauth/2.0/token'", "return", "requests", ".", "post", "(", "url", ",", "data", "=", "data", ")" ]
使用 Refresh Token 刷新以获得新的 Access Token. :param refresh_token: 用于刷新 Access Token 用的 Refresh Token; :param client_id: 应用的 API Key; :param client_secret: 应用的 Secret Key; :param scope: 以空格分隔的权限列表,若不传递此参数,代表请求的数据访问 操作权限与上次获取 Access Token 时一致。通过 Refresh Token 刷新 Access Token 时所要求的 scope 权限范围必须小于等于上次 获取 Access Token 时授予的权限范围。 关于权限的具体信息请参考 “ `权限列表`__ ”。 :return: Response 对象 关于 ``response.json()`` 字典的内容所代表的含义, 请参考 `相关的百度帮助文档`__ 。 __ http://developer.baidu.com/wiki/index.php?title=docs/oauth/baiduoauth/list __ http://developer.baidu.com/wiki/index.php?title=docs/oauth/refresh
[ "使用", "Refresh", "Token", "刷新以获得新的", "Access", "Token", "." ]
train
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/tools.py#L7-L36
persandstrom/python-verisure
verisure/session.py
Session.login
def login(self): """ Login to verisure app api Login before calling any read or write commands """ if os.path.exists(self._cookieFileName): with open(self._cookieFileName, 'r') as cookieFile: self._vid = cookieFile.read().strip() try: self._get_installations() except ResponseError: self._vid = None os.remove(self._cookieFileName) if self._vid is None: self._create_cookie() with open(self._cookieFileName, 'w') as cookieFile: cookieFile.write(self._vid) self._get_installations() self._giid = self.installations[0]['giid']
python
def login(self): """ Login to verisure app api Login before calling any read or write commands """ if os.path.exists(self._cookieFileName): with open(self._cookieFileName, 'r') as cookieFile: self._vid = cookieFile.read().strip() try: self._get_installations() except ResponseError: self._vid = None os.remove(self._cookieFileName) if self._vid is None: self._create_cookie() with open(self._cookieFileName, 'w') as cookieFile: cookieFile.write(self._vid) self._get_installations() self._giid = self.installations[0]['giid']
[ "def", "login", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_cookieFileName", ")", ":", "with", "open", "(", "self", ".", "_cookieFileName", ",", "'r'", ")", "as", "cookieFile", ":", "self", ".", "_vid", "=", "cookieFile", ".", "read", "(", ")", ".", "strip", "(", ")", "try", ":", "self", ".", "_get_installations", "(", ")", "except", "ResponseError", ":", "self", ".", "_vid", "=", "None", "os", ".", "remove", "(", "self", ".", "_cookieFileName", ")", "if", "self", ".", "_vid", "is", "None", ":", "self", ".", "_create_cookie", "(", ")", "with", "open", "(", "self", ".", "_cookieFileName", ",", "'w'", ")", "as", "cookieFile", ":", "cookieFile", ".", "write", "(", "self", ".", "_vid", ")", "self", ".", "_get_installations", "(", ")", "self", ".", "_giid", "=", "self", ".", "installations", "[", "0", "]", "[", "'giid'", "]" ]
Login to verisure app api Login before calling any read or write commands
[ "Login", "to", "verisure", "app", "api" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L73-L95
persandstrom/python-verisure
verisure/session.py
Session._get_installations
def _get_installations(self): """ Get information about installations """ response = None for base_url in urls.BASE_URLS: urls.BASE_URL = base_url try: response = requests.get( urls.get_installations(self._username), headers={ 'Cookie': 'vid={}'.format(self._vid), 'Accept': 'application/json,' 'text/javascript, */*; q=0.01', }) if 2 == response.status_code // 100: break elif 503 == response.status_code: continue else: raise ResponseError(response.status_code, response.text) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) self.installations = json.loads(response.text)
python
def _get_installations(self): """ Get information about installations """ response = None for base_url in urls.BASE_URLS: urls.BASE_URL = base_url try: response = requests.get( urls.get_installations(self._username), headers={ 'Cookie': 'vid={}'.format(self._vid), 'Accept': 'application/json,' 'text/javascript, */*; q=0.01', }) if 2 == response.status_code // 100: break elif 503 == response.status_code: continue else: raise ResponseError(response.status_code, response.text) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) self.installations = json.loads(response.text)
[ "def", "_get_installations", "(", "self", ")", ":", "response", "=", "None", "for", "base_url", "in", "urls", ".", "BASE_URLS", ":", "urls", ".", "BASE_URL", "=", "base_url", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "get_installations", "(", "self", ".", "_username", ")", ",", "headers", "=", "{", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", ",", "'Accept'", ":", "'application/json,'", "'text/javascript, */*; q=0.01'", ",", "}", ")", "if", "2", "==", "response", ".", "status_code", "//", "100", ":", "break", "elif", "503", "==", "response", ".", "status_code", ":", "continue", "else", ":", "raise", "ResponseError", "(", "response", ".", "status_code", ",", "response", ".", "text", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "self", ".", "installations", "=", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Get information about installations
[ "Get", "information", "about", "installations" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L127-L150
persandstrom/python-verisure
verisure/session.py
Session.get_overview
def get_overview(self): """ Get overview for installation """ response = None try: response = requests.get( urls.overview(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Encoding': 'gzip, deflate', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def get_overview(self): """ Get overview for installation """ response = None try: response = requests.get( urls.overview(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Encoding': 'gzip, deflate', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "get_overview", "(", "self", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "overview", "(", "self", ".", "_giid", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json, text/javascript, */*; q=0.01'", ",", "'Accept-Encoding'", ":", "'gzip, deflate'", ",", "'Content-Type'", ":", "'application/json'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Get overview for installation
[ "Get", "overview", "for", "installation" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L160-L174
persandstrom/python-verisure
verisure/session.py
Session.set_smartplug_state
def set_smartplug_state(self, device_label, state): """ Turn on or off smartplug Args: device_label (str): Smartplug device label state (boolean): new status, 'True' or 'False' """ response = None try: response = requests.post( urls.smartplug(self._giid), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps([{ "deviceLabel": device_label, "state": state}])) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
python
def set_smartplug_state(self, device_label, state): """ Turn on or off smartplug Args: device_label (str): Smartplug device label state (boolean): new status, 'True' or 'False' """ response = None try: response = requests.post( urls.smartplug(self._giid), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps([{ "deviceLabel": device_label, "state": state}])) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
[ "def", "set_smartplug_state", "(", "self", ",", "device_label", ",", "state", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "post", "(", "urls", ".", "smartplug", "(", "self", ".", "_giid", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "data", "=", "json", ".", "dumps", "(", "[", "{", "\"deviceLabel\"", ":", "device_label", ",", "\"state\"", ":", "state", "}", "]", ")", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")" ]
Turn on or off smartplug Args: device_label (str): Smartplug device label state (boolean): new status, 'True' or 'False'
[ "Turn", "on", "or", "off", "smartplug" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L176-L195
persandstrom/python-verisure
verisure/session.py
Session.get_history
def get_history(self, filters=(), pagesize=15, offset=0): """ Get recent events Args: filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION', 'TECHNICAL', 'SOS', 'WARNING', 'LOCK', 'UNLOCK' pagesize (int): Number of events to display offset (int): Skip pagesize * offset first events """ response = None try: response = requests.get( urls.history(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "offset": int(offset), "pagesize": int(pagesize), "notificationCategories": filters}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def get_history(self, filters=(), pagesize=15, offset=0): """ Get recent events Args: filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION', 'TECHNICAL', 'SOS', 'WARNING', 'LOCK', 'UNLOCK' pagesize (int): Number of events to display offset (int): Skip pagesize * offset first events """ response = None try: response = requests.get( urls.history(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "offset": int(offset), "pagesize": int(pagesize), "notificationCategories": filters}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "get_history", "(", "self", ",", "filters", "=", "(", ")", ",", "pagesize", "=", "15", ",", "offset", "=", "0", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "history", "(", "self", ".", "_giid", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json, text/javascript, */*; q=0.01'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "params", "=", "{", "\"offset\"", ":", "int", "(", "offset", ")", ",", "\"pagesize\"", ":", "int", "(", "pagesize", ")", ",", "\"notificationCategories\"", ":", "filters", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Get recent events Args: filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION', 'TECHNICAL', 'SOS', 'WARNING', 'LOCK', 'UNLOCK' pagesize (int): Number of events to display offset (int): Skip pagesize * offset first events
[ "Get", "recent", "events" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L250-L274
persandstrom/python-verisure
verisure/session.py
Session.get_climate
def get_climate(self, device_label): """ Get climate history Args: device_label: device label of climate device """ response = None try: response = requests.get( urls.climate(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "deviceLabel": device_label}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def get_climate(self, device_label): """ Get climate history Args: device_label: device label of climate device """ response = None try: response = requests.get( urls.climate(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "deviceLabel": device_label}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "get_climate", "(", "self", ",", "device_label", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "climate", "(", "self", ".", "_giid", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json, text/javascript, */*; q=0.01'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "params", "=", "{", "\"deviceLabel\"", ":", "device_label", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Get climate history Args: device_label: device label of climate device
[ "Get", "climate", "history", "Args", ":", "device_label", ":", "device", "label", "of", "climate", "device" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L276-L293
persandstrom/python-verisure
verisure/session.py
Session.set_lock_state
def set_lock_state(self, code, device_label, state): """ Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock' """ response = None try: response = requests.put( urls.set_lockstate(self._giid, device_label, state), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({"code": str(code)})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def set_lock_state(self, code, device_label, state): """ Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock' """ response = None try: response = requests.put( urls.set_lockstate(self._giid, device_label, state), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({"code": str(code)})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "set_lock_state", "(", "self", ",", "code", ",", "device_label", ",", "state", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "put", "(", "urls", ".", "set_lockstate", "(", "self", ".", "_giid", ",", "device_label", ",", "state", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json, text/javascript, */*; q=0.01'", ",", "'Content-Type'", ":", "'application/json'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "data", "=", "json", ".", "dumps", "(", "{", "\"code\"", ":", "str", "(", "code", ")", "}", ")", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock'
[ "Lock", "or", "unlock" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L309-L329
persandstrom/python-verisure
verisure/session.py
Session.get_lock_state_transaction
def get_lock_state_transaction(self, transaction_id): """ Get lock state transaction status Args: transaction_id: Transaction ID received from set_lock_state """ response = None try: response = requests.get( urls.get_lockstate_transaction(self._giid, transaction_id), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def get_lock_state_transaction(self, transaction_id): """ Get lock state transaction status Args: transaction_id: Transaction ID received from set_lock_state """ response = None try: response = requests.get( urls.get_lockstate_transaction(self._giid, transaction_id), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "get_lock_state_transaction", "(", "self", ",", "transaction_id", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "get_lockstate_transaction", "(", "self", ".", "_giid", ",", "transaction_id", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json, text/javascript, */*; q=0.01'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Get lock state transaction status Args: transaction_id: Transaction ID received from set_lock_state
[ "Get", "lock", "state", "transaction", "status" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L331-L347
persandstrom/python-verisure
verisure/session.py
Session.get_lock_config
def get_lock_config(self, device_label): """ Get lock configuration Args: device_label (str): device label of lock """ response = None try: response = requests.get( urls.lockconfig(self._giid, device_label), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def get_lock_config(self, device_label): """ Get lock configuration Args: device_label (str): device label of lock """ response = None try: response = requests.get( urls.lockconfig(self._giid, device_label), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "get_lock_config", "(", "self", ",", "device_label", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "lockconfig", "(", "self", ".", "_giid", ",", "device_label", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json, text/javascript, */*; q=0.01'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Get lock configuration Args: device_label (str): device label of lock
[ "Get", "lock", "configuration" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L349-L365
persandstrom/python-verisure
verisure/session.py
Session.set_lock_config
def set_lock_config(self, device_label, volume=None, voice_level=None, auto_lock_enabled=None): """ Set lock configuration Args: device_label (str): device label of lock volume (str): 'SILENCE', 'LOW' or 'HIGH' voice_level (str): 'ESSENTIAL' or 'NORMAL' auto_lock_enabled (boolean): auto lock enabled """ response = None data = {} if volume: data['volume'] = volume if voice_level: data['voiceLevel'] = voice_level if auto_lock_enabled is not None: data['autoLockEnabled'] = auto_lock_enabled try: response = requests.put( urls.lockconfig(self._giid, device_label), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps(data)) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
python
def set_lock_config(self, device_label, volume=None, voice_level=None, auto_lock_enabled=None): """ Set lock configuration Args: device_label (str): device label of lock volume (str): 'SILENCE', 'LOW' or 'HIGH' voice_level (str): 'ESSENTIAL' or 'NORMAL' auto_lock_enabled (boolean): auto lock enabled """ response = None data = {} if volume: data['volume'] = volume if voice_level: data['voiceLevel'] = voice_level if auto_lock_enabled is not None: data['autoLockEnabled'] = auto_lock_enabled try: response = requests.put( urls.lockconfig(self._giid, device_label), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps(data)) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
[ "def", "set_lock_config", "(", "self", ",", "device_label", ",", "volume", "=", "None", ",", "voice_level", "=", "None", ",", "auto_lock_enabled", "=", "None", ")", ":", "response", "=", "None", "data", "=", "{", "}", "if", "volume", ":", "data", "[", "'volume'", "]", "=", "volume", "if", "voice_level", ":", "data", "[", "'voiceLevel'", "]", "=", "voice_level", "if", "auto_lock_enabled", "is", "not", "None", ":", "data", "[", "'autoLockEnabled'", "]", "=", "auto_lock_enabled", "try", ":", "response", "=", "requests", ".", "put", "(", "urls", ".", "lockconfig", "(", "self", ".", "_giid", ",", "device_label", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")" ]
Set lock configuration Args: device_label (str): device label of lock volume (str): 'SILENCE', 'LOW' or 'HIGH' voice_level (str): 'ESSENTIAL' or 'NORMAL' auto_lock_enabled (boolean): auto lock enabled
[ "Set", "lock", "configuration" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L367-L394
persandstrom/python-verisure
verisure/session.py
Session.capture_image
def capture_image(self, device_label): """ Capture smartcam image Args: device_label (str): device label of camera """ response = None try: response = requests.post( urls.imagecapture(self._giid, device_label), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
python
def capture_image(self, device_label): """ Capture smartcam image Args: device_label (str): device label of camera """ response = None try: response = requests.post( urls.imagecapture(self._giid, device_label), headers={ 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
[ "def", "capture_image", "(", "self", ",", "device_label", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "post", "(", "urls", ".", "imagecapture", "(", "self", ".", "_giid", ",", "device_label", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")" ]
Capture smartcam image Args: device_label (str): device label of camera
[ "Capture", "smartcam", "image" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L396-L411
persandstrom/python-verisure
verisure/session.py
Session.get_camera_imageseries
def get_camera_imageseries(self, number_of_imageseries=10, offset=0): """ Get smartcam image series Args: number_of_imageseries (int): number of image series to get offset (int): skip offset amount of image series """ response = None try: response = requests.get( urls.get_imageseries(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "numberOfImageSeries": int(number_of_imageseries), "offset": int(offset), "fromDate": "", "toDate": "", "onlyNotViewed": "", "_": self._giid}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def get_camera_imageseries(self, number_of_imageseries=10, offset=0): """ Get smartcam image series Args: number_of_imageseries (int): number of image series to get offset (int): skip offset amount of image series """ response = None try: response = requests.get( urls.get_imageseries(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "numberOfImageSeries": int(number_of_imageseries), "offset": int(offset), "fromDate": "", "toDate": "", "onlyNotViewed": "", "_": self._giid}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "get_camera_imageseries", "(", "self", ",", "number_of_imageseries", "=", "10", ",", "offset", "=", "0", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "get_imageseries", "(", "self", ".", "_giid", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json, text/javascript, */*; q=0.01'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "params", "=", "{", "\"numberOfImageSeries\"", ":", "int", "(", "number_of_imageseries", ")", ",", "\"offset\"", ":", "int", "(", "offset", ")", ",", "\"fromDate\"", ":", "\"\"", ",", "\"toDate\"", ":", "\"\"", ",", "\"onlyNotViewed\"", ":", "\"\"", ",", "\"_\"", ":", "self", ".", "_giid", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Get smartcam image series Args: number_of_imageseries (int): number of image series to get offset (int): skip offset amount of image series
[ "Get", "smartcam", "image", "series" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L413-L437
persandstrom/python-verisure
verisure/session.py
Session.download_image
def download_image(self, device_label, image_id, file_name): """ Download image taken by a smartcam Args: device_label (str): device label of camera image_id (str): image id from image series file_name (str): path to file """ response = None try: response = requests.get( urls.download_image(self._giid, device_label, image_id), headers={ 'Cookie': 'vid={}'.format(self._vid)}, stream=True) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) with open(file_name, 'wb') as image_file: for chunk in response.iter_content(chunk_size=1024): if chunk: image_file.write(chunk)
python
def download_image(self, device_label, image_id, file_name): """ Download image taken by a smartcam Args: device_label (str): device label of camera image_id (str): image id from image series file_name (str): path to file """ response = None try: response = requests.get( urls.download_image(self._giid, device_label, image_id), headers={ 'Cookie': 'vid={}'.format(self._vid)}, stream=True) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) with open(file_name, 'wb') as image_file: for chunk in response.iter_content(chunk_size=1024): if chunk: image_file.write(chunk)
[ "def", "download_image", "(", "self", ",", "device_label", ",", "image_id", ",", "file_name", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "download_image", "(", "self", ".", "_giid", ",", "device_label", ",", "image_id", ")", ",", "headers", "=", "{", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "stream", "=", "True", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "image_file", ":", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "1024", ")", ":", "if", "chunk", ":", "image_file", ".", "write", "(", "chunk", ")" ]
Download image taken by a smartcam Args: device_label (str): device label of camera image_id (str): image id from image series file_name (str): path to file
[ "Download", "image", "taken", "by", "a", "smartcam" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L439-L460
persandstrom/python-verisure
verisure/session.py
Session.logout
def logout(self): """ Logout and remove vid """ response = None try: response = requests.delete( urls.login(), headers={ 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
python
def logout(self): """ Logout and remove vid """ response = None try: response = requests.delete( urls.login(), headers={ 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
[ "def", "logout", "(", "self", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "delete", "(", "urls", ".", "login", "(", ")", ",", "headers", "=", "{", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")" ]
Logout and remove vid
[ "Logout", "and", "remove", "vid" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L503-L513
persandstrom/python-verisure
verisure/session.py
Session.set_heat_pump_mode
def set_heat_pump_mode(self, device_label, mode): """ Set heatpump mode Args: mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO' """ response = None try: response = requests.put( urls.set_heatpump_state(self._giid, device_label), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({'mode': mode})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def set_heat_pump_mode(self, device_label, mode): """ Set heatpump mode Args: mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO' """ response = None try: response = requests.put( urls.set_heatpump_state(self._giid, device_label), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({'mode': mode})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "set_heat_pump_mode", "(", "self", ",", "device_label", ",", "mode", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "put", "(", "urls", ".", "set_heatpump_state", "(", "self", ".", "_giid", ",", "device_label", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", ",", "'Content-Type'", ":", "'application/json'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "data", "=", "json", ".", "dumps", "(", "{", "'mode'", ":", "mode", "}", ")", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Set heatpump mode Args: mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
[ "Set", "heatpump", "mode", "Args", ":", "mode", "(", "str", ")", ":", "HEAT", "COOL", "FAN", "or", "AUTO" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L529-L546
persandstrom/python-verisure
verisure/session.py
Session.set_heat_pump_feature
def set_heat_pump_feature(self, device_label, feature): """ Set heatpump mode Args: feature: 'QUIET', 'ECONAVI', or 'POWERFUL' """ response = None try: response = requests.put( urls.set_heatpump_feature(self._giid, device_label, feature), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
python
def set_heat_pump_feature(self, device_label, feature): """ Set heatpump mode Args: feature: 'QUIET', 'ECONAVI', or 'POWERFUL' """ response = None try: response = requests.put( urls.set_heatpump_feature(self._giid, device_label, feature), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "set_heat_pump_feature", "(", "self", ",", "device_label", ",", "feature", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "put", "(", "urls", ".", "set_heatpump_feature", "(", "self", ".", "_giid", ",", "device_label", ",", "feature", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", ",", "'Content-Type'", ":", "'application/json'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Set heatpump mode Args: feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
[ "Set", "heatpump", "mode", "Args", ":", "feature", ":", "QUIET", "ECONAVI", "or", "POWERFUL" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L605-L621
persandstrom/python-verisure
verisure/__main__.py
print_result
def print_result(overview, *names): """ Print the result of a verisure request """ if names: for name in names: toprint = overview for part in name.split('/'): toprint = toprint[part] print(json.dumps(toprint, indent=4, separators=(',', ': '))) else: print(json.dumps(overview, indent=4, separators=(',', ': ')))
python
def print_result(overview, *names): """ Print the result of a verisure request """ if names: for name in names: toprint = overview for part in name.split('/'): toprint = toprint[part] print(json.dumps(toprint, indent=4, separators=(',', ': '))) else: print(json.dumps(overview, indent=4, separators=(',', ': ')))
[ "def", "print_result", "(", "overview", ",", "*", "names", ")", ":", "if", "names", ":", "for", "name", "in", "names", ":", "toprint", "=", "overview", "for", "part", "in", "name", ".", "split", "(", "'/'", ")", ":", "toprint", "=", "toprint", "[", "part", "]", "print", "(", "json", ".", "dumps", "(", "toprint", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "else", ":", "print", "(", "json", ".", "dumps", "(", "overview", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")" ]
Print the result of a verisure request
[ "Print", "the", "result", "of", "a", "verisure", "request" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/__main__.py#L22-L31
persandstrom/python-verisure
verisure/__main__.py
main
def main(): """ Start verisure command line """ parser = argparse.ArgumentParser( description='Read or change status of verisure devices') parser.add_argument( 'username', help='MyPages username') parser.add_argument( 'password', help='MyPages password') parser.add_argument( '-i', '--installation', help='Installation number', type=int, default=1) parser.add_argument( '-c', '--cookie', help='File to store cookie in', default='~/.verisure-cookie') commandsparser = parser.add_subparsers( help='commands', dest='command') # installations command commandsparser.add_parser( COMMAND_INSTALLATIONS, help='Get information about installations') # overview command overview_parser = commandsparser.add_parser( COMMAND_OVERVIEW, help='Read status of one or many device types') overview_parser.add_argument( 'filter', nargs='*', help='Read status for device type') # armstate command commandsparser.add_parser( COMMAND_ARMSTATE, help='Get arm state') # Set command set_parser = commandsparser.add_parser( COMMAND_SET, help='Set status of a device') set_device = set_parser.add_subparsers( help='device', dest='device') # Set smartplug set_smartplug = set_device.add_parser( 'smartplug', help='set smartplug value') set_smartplug.add_argument( 'device_label', help='device label') set_smartplug.add_argument( 'new_value', choices=[ 'on', 'off'], help='new value') # Set alarm set_alarm = set_device.add_parser( 'alarm', help='set alarm status') set_alarm.add_argument( 'code', help='alarm code') set_alarm.add_argument( 'new_status', choices=[ 'ARMED_HOME', 'ARMED_AWAY', 'DISARMED'], help='new status') # Set lock set_lock = set_device.add_parser( 'lock', help='set lock status') set_lock.add_argument( 'code', help='alarm code') set_lock.add_argument( 'serial_number', help='serial number') set_lock.add_argument( 'new_status', choices=[ 'lock', 'unlock'], help='new status') # Get climate history history_climate = commandsparser.add_parser( COMMAND_CLIMATE, help='get climate history') history_climate.add_argument( 'device_label', help='device label') # Event log command eventlog_parser = commandsparser.add_parser( COMMAND_EVENTLOG, help='Get event log') eventlog_parser.add_argument( '-p', '--pagesize', type=int, default=15, help='Number of elements on one page') eventlog_parser.add_argument( '-o', '--offset', type=int, default=0, help='Page offset') eventlog_parser.add_argument( '-f', '--filter', nargs='*', default=[], choices=[ 'ARM', 'DISARM', 'FIRE', 'INTRUSION', 'TECHNICAL', 'SOS', 'WARNING', 'LOCK', 'UNLOCK'], help='Filter event log') # Capture command capture_parser = commandsparser.add_parser( COMMAND_CAPTURE, help='Capture image') capture_parser.add_argument( 'device_label', help='Device label') # Image series command commandsparser.add_parser( COMMAND_IMAGESERIES, help='Get image series') # Get image command getimage_parser = commandsparser.add_parser( COMMAND_GETIMAGE, help='Download image') getimage_parser.add_argument( 'device_label', help='Device label') getimage_parser.add_argument( 'image_id', help='image ID') getimage_parser.add_argument( 'file_name', help='Output file name') # Vacation mode command commandsparser.add_parser( COMMAND_VACATIONMODE, help='Get vacation mode info') # Door window status command commandsparser.add_parser( COMMAND_DOOR_WINDOW, help='Get door/window status') # Test ethernet command commandsparser.add_parser( COMMAND_TEST_ETHERNET, help='Update ethernet status') args = parser.parse_args() session = verisure.Session(args.username, args.password, args.cookie) session.login() try: session.set_giid(session.installations[args.installation - 1]['giid']) if args.command == COMMAND_INSTALLATIONS: print_result(session.installations) if args.command == COMMAND_OVERVIEW: print_result(session.get_overview(), *args.filter) if args.command == COMMAND_ARMSTATE: print_result(session.get_arm_state()) if args.command == COMMAND_SET: if args.device == 'smartplug': session.set_smartplug_state( args.device_label, args.new_value == 'on') if args.device == 'alarm': print_result(session.set_arm_state( args.code, args.new_status)) if args.device == 'lock': print_result(session.set_lock_state( args.code, args.serial_number, args.new_status)) if args.command == COMMAND_CLIMATE: print_result(session.get_climate(args.device_label)) if args.command == COMMAND_EVENTLOG: print_result( session.get_history( args.filter, pagesize=args.pagesize, offset=args.offset)) if args.command == COMMAND_CAPTURE: session.capture_image(args.device_label) if args.command == COMMAND_IMAGESERIES: print_result(session.get_camera_imageseries()) if args.command == COMMAND_GETIMAGE: session.download_image( args.device_label, args.image_id, args.file_name) if args.command == COMMAND_VACATIONMODE: print_result(session.get_vacation_mode()) if args.command == COMMAND_DOOR_WINDOW: print_result(session.get_door_window()) if args.command == COMMAND_TEST_ETHERNET: session.test_ethernet() except verisure.session.ResponseError as ex: print(ex.text)
python
def main(): """ Start verisure command line """ parser = argparse.ArgumentParser( description='Read or change status of verisure devices') parser.add_argument( 'username', help='MyPages username') parser.add_argument( 'password', help='MyPages password') parser.add_argument( '-i', '--installation', help='Installation number', type=int, default=1) parser.add_argument( '-c', '--cookie', help='File to store cookie in', default='~/.verisure-cookie') commandsparser = parser.add_subparsers( help='commands', dest='command') # installations command commandsparser.add_parser( COMMAND_INSTALLATIONS, help='Get information about installations') # overview command overview_parser = commandsparser.add_parser( COMMAND_OVERVIEW, help='Read status of one or many device types') overview_parser.add_argument( 'filter', nargs='*', help='Read status for device type') # armstate command commandsparser.add_parser( COMMAND_ARMSTATE, help='Get arm state') # Set command set_parser = commandsparser.add_parser( COMMAND_SET, help='Set status of a device') set_device = set_parser.add_subparsers( help='device', dest='device') # Set smartplug set_smartplug = set_device.add_parser( 'smartplug', help='set smartplug value') set_smartplug.add_argument( 'device_label', help='device label') set_smartplug.add_argument( 'new_value', choices=[ 'on', 'off'], help='new value') # Set alarm set_alarm = set_device.add_parser( 'alarm', help='set alarm status') set_alarm.add_argument( 'code', help='alarm code') set_alarm.add_argument( 'new_status', choices=[ 'ARMED_HOME', 'ARMED_AWAY', 'DISARMED'], help='new status') # Set lock set_lock = set_device.add_parser( 'lock', help='set lock status') set_lock.add_argument( 'code', help='alarm code') set_lock.add_argument( 'serial_number', help='serial number') set_lock.add_argument( 'new_status', choices=[ 'lock', 'unlock'], help='new status') # Get climate history history_climate = commandsparser.add_parser( COMMAND_CLIMATE, help='get climate history') history_climate.add_argument( 'device_label', help='device label') # Event log command eventlog_parser = commandsparser.add_parser( COMMAND_EVENTLOG, help='Get event log') eventlog_parser.add_argument( '-p', '--pagesize', type=int, default=15, help='Number of elements on one page') eventlog_parser.add_argument( '-o', '--offset', type=int, default=0, help='Page offset') eventlog_parser.add_argument( '-f', '--filter', nargs='*', default=[], choices=[ 'ARM', 'DISARM', 'FIRE', 'INTRUSION', 'TECHNICAL', 'SOS', 'WARNING', 'LOCK', 'UNLOCK'], help='Filter event log') # Capture command capture_parser = commandsparser.add_parser( COMMAND_CAPTURE, help='Capture image') capture_parser.add_argument( 'device_label', help='Device label') # Image series command commandsparser.add_parser( COMMAND_IMAGESERIES, help='Get image series') # Get image command getimage_parser = commandsparser.add_parser( COMMAND_GETIMAGE, help='Download image') getimage_parser.add_argument( 'device_label', help='Device label') getimage_parser.add_argument( 'image_id', help='image ID') getimage_parser.add_argument( 'file_name', help='Output file name') # Vacation mode command commandsparser.add_parser( COMMAND_VACATIONMODE, help='Get vacation mode info') # Door window status command commandsparser.add_parser( COMMAND_DOOR_WINDOW, help='Get door/window status') # Test ethernet command commandsparser.add_parser( COMMAND_TEST_ETHERNET, help='Update ethernet status') args = parser.parse_args() session = verisure.Session(args.username, args.password, args.cookie) session.login() try: session.set_giid(session.installations[args.installation - 1]['giid']) if args.command == COMMAND_INSTALLATIONS: print_result(session.installations) if args.command == COMMAND_OVERVIEW: print_result(session.get_overview(), *args.filter) if args.command == COMMAND_ARMSTATE: print_result(session.get_arm_state()) if args.command == COMMAND_SET: if args.device == 'smartplug': session.set_smartplug_state( args.device_label, args.new_value == 'on') if args.device == 'alarm': print_result(session.set_arm_state( args.code, args.new_status)) if args.device == 'lock': print_result(session.set_lock_state( args.code, args.serial_number, args.new_status)) if args.command == COMMAND_CLIMATE: print_result(session.get_climate(args.device_label)) if args.command == COMMAND_EVENTLOG: print_result( session.get_history( args.filter, pagesize=args.pagesize, offset=args.offset)) if args.command == COMMAND_CAPTURE: session.capture_image(args.device_label) if args.command == COMMAND_IMAGESERIES: print_result(session.get_camera_imageseries()) if args.command == COMMAND_GETIMAGE: session.download_image( args.device_label, args.image_id, args.file_name) if args.command == COMMAND_VACATIONMODE: print_result(session.get_vacation_mode()) if args.command == COMMAND_DOOR_WINDOW: print_result(session.get_door_window()) if args.command == COMMAND_TEST_ETHERNET: session.test_ethernet() except verisure.session.ResponseError as ex: print(ex.text)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Read or change status of verisure devices'", ")", "parser", ".", "add_argument", "(", "'username'", ",", "help", "=", "'MyPages username'", ")", "parser", ".", "add_argument", "(", "'password'", ",", "help", "=", "'MyPages password'", ")", "parser", ".", "add_argument", "(", "'-i'", ",", "'--installation'", ",", "help", "=", "'Installation number'", ",", "type", "=", "int", ",", "default", "=", "1", ")", "parser", ".", "add_argument", "(", "'-c'", ",", "'--cookie'", ",", "help", "=", "'File to store cookie in'", ",", "default", "=", "'~/.verisure-cookie'", ")", "commandsparser", "=", "parser", ".", "add_subparsers", "(", "help", "=", "'commands'", ",", "dest", "=", "'command'", ")", "# installations command", "commandsparser", ".", "add_parser", "(", "COMMAND_INSTALLATIONS", ",", "help", "=", "'Get information about installations'", ")", "# overview command", "overview_parser", "=", "commandsparser", ".", "add_parser", "(", "COMMAND_OVERVIEW", ",", "help", "=", "'Read status of one or many device types'", ")", "overview_parser", ".", "add_argument", "(", "'filter'", ",", "nargs", "=", "'*'", ",", "help", "=", "'Read status for device type'", ")", "# armstate command", "commandsparser", ".", "add_parser", "(", "COMMAND_ARMSTATE", ",", "help", "=", "'Get arm state'", ")", "# Set command", "set_parser", "=", "commandsparser", ".", "add_parser", "(", "COMMAND_SET", ",", "help", "=", "'Set status of a device'", ")", "set_device", "=", "set_parser", ".", "add_subparsers", "(", "help", "=", "'device'", ",", "dest", "=", "'device'", ")", "# Set smartplug", "set_smartplug", "=", "set_device", ".", "add_parser", "(", "'smartplug'", ",", "help", "=", "'set smartplug value'", ")", "set_smartplug", ".", "add_argument", "(", "'device_label'", ",", "help", "=", "'device label'", ")", "set_smartplug", ".", "add_argument", "(", "'new_value'", ",", "choices", "=", "[", "'on'", ",", "'off'", "]", ",", "help", "=", "'new value'", ")", "# Set alarm", "set_alarm", "=", "set_device", ".", "add_parser", "(", "'alarm'", ",", "help", "=", "'set alarm status'", ")", "set_alarm", ".", "add_argument", "(", "'code'", ",", "help", "=", "'alarm code'", ")", "set_alarm", ".", "add_argument", "(", "'new_status'", ",", "choices", "=", "[", "'ARMED_HOME'", ",", "'ARMED_AWAY'", ",", "'DISARMED'", "]", ",", "help", "=", "'new status'", ")", "# Set lock", "set_lock", "=", "set_device", ".", "add_parser", "(", "'lock'", ",", "help", "=", "'set lock status'", ")", "set_lock", ".", "add_argument", "(", "'code'", ",", "help", "=", "'alarm code'", ")", "set_lock", ".", "add_argument", "(", "'serial_number'", ",", "help", "=", "'serial number'", ")", "set_lock", ".", "add_argument", "(", "'new_status'", ",", "choices", "=", "[", "'lock'", ",", "'unlock'", "]", ",", "help", "=", "'new status'", ")", "# Get climate history", "history_climate", "=", "commandsparser", ".", "add_parser", "(", "COMMAND_CLIMATE", ",", "help", "=", "'get climate history'", ")", "history_climate", ".", "add_argument", "(", "'device_label'", ",", "help", "=", "'device label'", ")", "# Event log command", "eventlog_parser", "=", "commandsparser", ".", "add_parser", "(", "COMMAND_EVENTLOG", ",", "help", "=", "'Get event log'", ")", "eventlog_parser", ".", "add_argument", "(", "'-p'", ",", "'--pagesize'", ",", "type", "=", "int", ",", "default", "=", "15", ",", "help", "=", "'Number of elements on one page'", ")", "eventlog_parser", ".", "add_argument", "(", "'-o'", ",", "'--offset'", ",", "type", "=", "int", ",", "default", "=", "0", ",", "help", "=", "'Page offset'", ")", "eventlog_parser", ".", "add_argument", "(", "'-f'", ",", "'--filter'", ",", "nargs", "=", "'*'", ",", "default", "=", "[", "]", ",", "choices", "=", "[", "'ARM'", ",", "'DISARM'", ",", "'FIRE'", ",", "'INTRUSION'", ",", "'TECHNICAL'", ",", "'SOS'", ",", "'WARNING'", ",", "'LOCK'", ",", "'UNLOCK'", "]", ",", "help", "=", "'Filter event log'", ")", "# Capture command", "capture_parser", "=", "commandsparser", ".", "add_parser", "(", "COMMAND_CAPTURE", ",", "help", "=", "'Capture image'", ")", "capture_parser", ".", "add_argument", "(", "'device_label'", ",", "help", "=", "'Device label'", ")", "# Image series command", "commandsparser", ".", "add_parser", "(", "COMMAND_IMAGESERIES", ",", "help", "=", "'Get image series'", ")", "# Get image command", "getimage_parser", "=", "commandsparser", ".", "add_parser", "(", "COMMAND_GETIMAGE", ",", "help", "=", "'Download image'", ")", "getimage_parser", ".", "add_argument", "(", "'device_label'", ",", "help", "=", "'Device label'", ")", "getimage_parser", ".", "add_argument", "(", "'image_id'", ",", "help", "=", "'image ID'", ")", "getimage_parser", ".", "add_argument", "(", "'file_name'", ",", "help", "=", "'Output file name'", ")", "# Vacation mode command", "commandsparser", ".", "add_parser", "(", "COMMAND_VACATIONMODE", ",", "help", "=", "'Get vacation mode info'", ")", "# Door window status command", "commandsparser", ".", "add_parser", "(", "COMMAND_DOOR_WINDOW", ",", "help", "=", "'Get door/window status'", ")", "# Test ethernet command", "commandsparser", ".", "add_parser", "(", "COMMAND_TEST_ETHERNET", ",", "help", "=", "'Update ethernet status'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "session", "=", "verisure", ".", "Session", "(", "args", ".", "username", ",", "args", ".", "password", ",", "args", ".", "cookie", ")", "session", ".", "login", "(", ")", "try", ":", "session", ".", "set_giid", "(", "session", ".", "installations", "[", "args", ".", "installation", "-", "1", "]", "[", "'giid'", "]", ")", "if", "args", ".", "command", "==", "COMMAND_INSTALLATIONS", ":", "print_result", "(", "session", ".", "installations", ")", "if", "args", ".", "command", "==", "COMMAND_OVERVIEW", ":", "print_result", "(", "session", ".", "get_overview", "(", ")", ",", "*", "args", ".", "filter", ")", "if", "args", ".", "command", "==", "COMMAND_ARMSTATE", ":", "print_result", "(", "session", ".", "get_arm_state", "(", ")", ")", "if", "args", ".", "command", "==", "COMMAND_SET", ":", "if", "args", ".", "device", "==", "'smartplug'", ":", "session", ".", "set_smartplug_state", "(", "args", ".", "device_label", ",", "args", ".", "new_value", "==", "'on'", ")", "if", "args", ".", "device", "==", "'alarm'", ":", "print_result", "(", "session", ".", "set_arm_state", "(", "args", ".", "code", ",", "args", ".", "new_status", ")", ")", "if", "args", ".", "device", "==", "'lock'", ":", "print_result", "(", "session", ".", "set_lock_state", "(", "args", ".", "code", ",", "args", ".", "serial_number", ",", "args", ".", "new_status", ")", ")", "if", "args", ".", "command", "==", "COMMAND_CLIMATE", ":", "print_result", "(", "session", ".", "get_climate", "(", "args", ".", "device_label", ")", ")", "if", "args", ".", "command", "==", "COMMAND_EVENTLOG", ":", "print_result", "(", "session", ".", "get_history", "(", "args", ".", "filter", ",", "pagesize", "=", "args", ".", "pagesize", ",", "offset", "=", "args", ".", "offset", ")", ")", "if", "args", ".", "command", "==", "COMMAND_CAPTURE", ":", "session", ".", "capture_image", "(", "args", ".", "device_label", ")", "if", "args", ".", "command", "==", "COMMAND_IMAGESERIES", ":", "print_result", "(", "session", ".", "get_camera_imageseries", "(", ")", ")", "if", "args", ".", "command", "==", "COMMAND_GETIMAGE", ":", "session", ".", "download_image", "(", "args", ".", "device_label", ",", "args", ".", "image_id", ",", "args", ".", "file_name", ")", "if", "args", ".", "command", "==", "COMMAND_VACATIONMODE", ":", "print_result", "(", "session", ".", "get_vacation_mode", "(", ")", ")", "if", "args", ".", "command", "==", "COMMAND_DOOR_WINDOW", ":", "print_result", "(", "session", ".", "get_door_window", "(", ")", ")", "if", "args", ".", "command", "==", "COMMAND_TEST_ETHERNET", ":", "session", ".", "test_ethernet", "(", ")", "except", "verisure", ".", "session", ".", "ResponseError", "as", "ex", ":", "print", "(", "ex", ".", "text", ")" ]
Start verisure command line
[ "Start", "verisure", "command", "line" ]
train
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/__main__.py#L35-L261
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginbase.py
ContentPlugin.type_id
def type_id(self): """ Shortcut to retrieving the ContentType id of the model. """ try: return ContentType.objects.get_for_model(self.model, for_concrete_model=False).id except DatabaseError as e: raise DatabaseError("Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})".format(str(e)))
python
def type_id(self): """ Shortcut to retrieving the ContentType id of the model. """ try: return ContentType.objects.get_for_model(self.model, for_concrete_model=False).id except DatabaseError as e: raise DatabaseError("Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})".format(str(e)))
[ "def", "type_id", "(", "self", ")", ":", "try", ":", "return", "ContentType", ".", "objects", ".", "get_for_model", "(", "self", ".", "model", ",", "for_concrete_model", "=", "False", ")", ".", "id", "except", "DatabaseError", "as", "e", ":", "raise", "DatabaseError", "(", "\"Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})\"", ".", "format", "(", "str", "(", "e", ")", ")", ")" ]
Shortcut to retrieving the ContentType id of the model.
[ "Shortcut", "to", "retrieving", "the", "ContentType", "id", "of", "the", "model", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginbase.py#L291-L298
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginbase.py
ContentPlugin.get_output_cache_key
def get_output_cache_key(self, placeholder_name, instance): """ .. versionadded:: 0.9 Return the default cache key which is used to store a rendered item. By default, this function generates the cache key using :func:`get_output_cache_base_key`. """ cachekey = self.get_output_cache_base_key(placeholder_name, instance) if self.cache_output_per_site: cachekey = "{0}-s{1}".format(cachekey, settings.SITE_ID) # Append language code if self.cache_output_per_language: # NOTE: Not using self.language_code, but using the current language instead. # That is what the {% trans %} tags are rendered as after all. # The render_placeholder() code can switch the language if needed. user_language = get_language() if user_language not in self.cache_supported_language_codes: user_language = 'unsupported' cachekey = "{0}.{1}".format(cachekey, user_language) return cachekey
python
def get_output_cache_key(self, placeholder_name, instance): """ .. versionadded:: 0.9 Return the default cache key which is used to store a rendered item. By default, this function generates the cache key using :func:`get_output_cache_base_key`. """ cachekey = self.get_output_cache_base_key(placeholder_name, instance) if self.cache_output_per_site: cachekey = "{0}-s{1}".format(cachekey, settings.SITE_ID) # Append language code if self.cache_output_per_language: # NOTE: Not using self.language_code, but using the current language instead. # That is what the {% trans %} tags are rendered as after all. # The render_placeholder() code can switch the language if needed. user_language = get_language() if user_language not in self.cache_supported_language_codes: user_language = 'unsupported' cachekey = "{0}.{1}".format(cachekey, user_language) return cachekey
[ "def", "get_output_cache_key", "(", "self", ",", "placeholder_name", ",", "instance", ")", ":", "cachekey", "=", "self", ".", "get_output_cache_base_key", "(", "placeholder_name", ",", "instance", ")", "if", "self", ".", "cache_output_per_site", ":", "cachekey", "=", "\"{0}-s{1}\"", ".", "format", "(", "cachekey", ",", "settings", ".", "SITE_ID", ")", "# Append language code", "if", "self", ".", "cache_output_per_language", ":", "# NOTE: Not using self.language_code, but using the current language instead.", "# That is what the {% trans %} tags are rendered as after all.", "# The render_placeholder() code can switch the language if needed.", "user_language", "=", "get_language", "(", ")", "if", "user_language", "not", "in", "self", ".", "cache_supported_language_codes", ":", "user_language", "=", "'unsupported'", "cachekey", "=", "\"{0}.{1}\"", ".", "format", "(", "cachekey", ",", "user_language", ")", "return", "cachekey" ]
.. versionadded:: 0.9 Return the default cache key which is used to store a rendered item. By default, this function generates the cache key using :func:`get_output_cache_base_key`.
[ "..", "versionadded", "::", "0", ".", "9", "Return", "the", "default", "cache", "key", "which", "is", "used", "to", "store", "a", "rendered", "item", ".", "By", "default", "this", "function", "generates", "the", "cache", "key", "using", ":", "func", ":", "get_output_cache_base_key", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginbase.py#L339-L359
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginbase.py
ContentPlugin.get_output_cache_keys
def get_output_cache_keys(self, placeholder_name, instance): """ .. versionadded:: 0.9 Return the possible cache keys for a rendered item. This method should be overwritten when implementing a function :func:`set_cached_output` method or when implementing a :func:`get_output_cache_key` function. By default, this function generates the cache key using :func:`get_output_cache_base_key`. """ base_key = self.get_output_cache_base_key(placeholder_name, instance) cachekeys = [ base_key, ] if self.cache_output_per_site: site_ids = list(Site.objects.values_list('pk', flat=True)) if settings.SITE_ID not in site_ids: site_ids.append(settings.SITE_ID) base_key = get_rendering_cache_key(placeholder_name, instance) cachekeys = ["{0}-s{1}".format(base_key, site_id) for site_id in site_ids] if self.cache_output_per_language or self.render_ignore_item_language: # Append language code to all keys, # have to invalidate a lot more items in memcache. # Also added "None" suffix, since get_parent_language_code() may return that. # TODO: ideally for render_ignore_item_language, only invalidate all when the fallback language changed. total_list = [] cache_languages = list(self.cache_supported_language_codes) + ['unsupported', 'None'] # All variants of the Placeholder (for full page caching) placeholder = instance.placeholder total_list.extend(get_placeholder_cache_key(placeholder, lc) for lc in cache_languages) # All variants of the ContentItem in different languages for user_language in cache_languages: total_list.extend("{0}.{1}".format(base, user_language) for base in cachekeys) cachekeys = total_list return cachekeys
python
def get_output_cache_keys(self, placeholder_name, instance): """ .. versionadded:: 0.9 Return the possible cache keys for a rendered item. This method should be overwritten when implementing a function :func:`set_cached_output` method or when implementing a :func:`get_output_cache_key` function. By default, this function generates the cache key using :func:`get_output_cache_base_key`. """ base_key = self.get_output_cache_base_key(placeholder_name, instance) cachekeys = [ base_key, ] if self.cache_output_per_site: site_ids = list(Site.objects.values_list('pk', flat=True)) if settings.SITE_ID not in site_ids: site_ids.append(settings.SITE_ID) base_key = get_rendering_cache_key(placeholder_name, instance) cachekeys = ["{0}-s{1}".format(base_key, site_id) for site_id in site_ids] if self.cache_output_per_language or self.render_ignore_item_language: # Append language code to all keys, # have to invalidate a lot more items in memcache. # Also added "None" suffix, since get_parent_language_code() may return that. # TODO: ideally for render_ignore_item_language, only invalidate all when the fallback language changed. total_list = [] cache_languages = list(self.cache_supported_language_codes) + ['unsupported', 'None'] # All variants of the Placeholder (for full page caching) placeholder = instance.placeholder total_list.extend(get_placeholder_cache_key(placeholder, lc) for lc in cache_languages) # All variants of the ContentItem in different languages for user_language in cache_languages: total_list.extend("{0}.{1}".format(base, user_language) for base in cachekeys) cachekeys = total_list return cachekeys
[ "def", "get_output_cache_keys", "(", "self", ",", "placeholder_name", ",", "instance", ")", ":", "base_key", "=", "self", ".", "get_output_cache_base_key", "(", "placeholder_name", ",", "instance", ")", "cachekeys", "=", "[", "base_key", ",", "]", "if", "self", ".", "cache_output_per_site", ":", "site_ids", "=", "list", "(", "Site", ".", "objects", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ")", "if", "settings", ".", "SITE_ID", "not", "in", "site_ids", ":", "site_ids", ".", "append", "(", "settings", ".", "SITE_ID", ")", "base_key", "=", "get_rendering_cache_key", "(", "placeholder_name", ",", "instance", ")", "cachekeys", "=", "[", "\"{0}-s{1}\"", ".", "format", "(", "base_key", ",", "site_id", ")", "for", "site_id", "in", "site_ids", "]", "if", "self", ".", "cache_output_per_language", "or", "self", ".", "render_ignore_item_language", ":", "# Append language code to all keys,", "# have to invalidate a lot more items in memcache.", "# Also added \"None\" suffix, since get_parent_language_code() may return that.", "# TODO: ideally for render_ignore_item_language, only invalidate all when the fallback language changed.", "total_list", "=", "[", "]", "cache_languages", "=", "list", "(", "self", ".", "cache_supported_language_codes", ")", "+", "[", "'unsupported'", ",", "'None'", "]", "# All variants of the Placeholder (for full page caching)", "placeholder", "=", "instance", ".", "placeholder", "total_list", ".", "extend", "(", "get_placeholder_cache_key", "(", "placeholder", ",", "lc", ")", "for", "lc", "in", "cache_languages", ")", "# All variants of the ContentItem in different languages", "for", "user_language", "in", "cache_languages", ":", "total_list", ".", "extend", "(", "\"{0}.{1}\"", ".", "format", "(", "base", ",", "user_language", ")", "for", "base", "in", "cachekeys", ")", "cachekeys", "=", "total_list", "return", "cachekeys" ]
.. versionadded:: 0.9 Return the possible cache keys for a rendered item. This method should be overwritten when implementing a function :func:`set_cached_output` method or when implementing a :func:`get_output_cache_key` function. By default, this function generates the cache key using :func:`get_output_cache_base_key`.
[ "..", "versionadded", "::", "0", ".", "9", "Return", "the", "possible", "cache", "keys", "for", "a", "rendered", "item", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginbase.py#L361-L400
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginbase.py
ContentPlugin.get_cached_output
def get_cached_output(self, placeholder_name, instance): """ .. versionadded:: 0.9 Return the cached output for a rendered item, or ``None`` if no output is cached. This method can be overwritten to implement custom caching mechanisms. By default, this function generates the cache key using :func:`get_output_cache_key` and retrieves the results from the configured Django cache backend (e.g. memcached). """ cachekey = self.get_output_cache_key(placeholder_name, instance) return cache.get(cachekey)
python
def get_cached_output(self, placeholder_name, instance): """ .. versionadded:: 0.9 Return the cached output for a rendered item, or ``None`` if no output is cached. This method can be overwritten to implement custom caching mechanisms. By default, this function generates the cache key using :func:`get_output_cache_key` and retrieves the results from the configured Django cache backend (e.g. memcached). """ cachekey = self.get_output_cache_key(placeholder_name, instance) return cache.get(cachekey)
[ "def", "get_cached_output", "(", "self", ",", "placeholder_name", ",", "instance", ")", ":", "cachekey", "=", "self", ".", "get_output_cache_key", "(", "placeholder_name", ",", "instance", ")", "return", "cache", ".", "get", "(", "cachekey", ")" ]
.. versionadded:: 0.9 Return the cached output for a rendered item, or ``None`` if no output is cached. This method can be overwritten to implement custom caching mechanisms. By default, this function generates the cache key using :func:`get_output_cache_key` and retrieves the results from the configured Django cache backend (e.g. memcached).
[ "..", "versionadded", "::", "0", ".", "9", "Return", "the", "cached", "output", "for", "a", "rendered", "item", "or", "None", "if", "no", "output", "is", "cached", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginbase.py#L402-L412
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginbase.py
ContentPlugin.set_cached_output
def set_cached_output(self, placeholder_name, instance, output): """ .. versionadded:: 0.9 Store the cached output for a rendered item. This method can be overwritten to implement custom caching mechanisms. By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key` and stores the results in the configured Django cache backend (e.g. memcached). When custom cache keys are used, also include those in :func:`get_output_cache_keys` so the cache will be cleared when needed. .. versionchanged:: 1.0 The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object. """ cachekey = self.get_output_cache_key(placeholder_name, instance) if self.cache_timeout is not DEFAULT_TIMEOUT: cache.set(cachekey, output, self.cache_timeout) else: # Don't want to mix into the default 0/None issue. cache.set(cachekey, output)
python
def set_cached_output(self, placeholder_name, instance, output): """ .. versionadded:: 0.9 Store the cached output for a rendered item. This method can be overwritten to implement custom caching mechanisms. By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key` and stores the results in the configured Django cache backend (e.g. memcached). When custom cache keys are used, also include those in :func:`get_output_cache_keys` so the cache will be cleared when needed. .. versionchanged:: 1.0 The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object. """ cachekey = self.get_output_cache_key(placeholder_name, instance) if self.cache_timeout is not DEFAULT_TIMEOUT: cache.set(cachekey, output, self.cache_timeout) else: # Don't want to mix into the default 0/None issue. cache.set(cachekey, output)
[ "def", "set_cached_output", "(", "self", ",", "placeholder_name", ",", "instance", ",", "output", ")", ":", "cachekey", "=", "self", ".", "get_output_cache_key", "(", "placeholder_name", ",", "instance", ")", "if", "self", ".", "cache_timeout", "is", "not", "DEFAULT_TIMEOUT", ":", "cache", ".", "set", "(", "cachekey", ",", "output", ",", "self", ".", "cache_timeout", ")", "else", ":", "# Don't want to mix into the default 0/None issue.", "cache", ".", "set", "(", "cachekey", ",", "output", ")" ]
.. versionadded:: 0.9 Store the cached output for a rendered item. This method can be overwritten to implement custom caching mechanisms. By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key` and stores the results in the configured Django cache backend (e.g. memcached). When custom cache keys are used, also include those in :func:`get_output_cache_keys` so the cache will be cleared when needed. .. versionchanged:: 1.0 The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object.
[ "..", "versionadded", "::", "0", ".", "9", "Store", "the", "cached", "output", "for", "a", "rendered", "item", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginbase.py#L414-L434
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginbase.py
ContentPlugin.render
def render(self, request, instance, **kwargs): """ The rendering/view function that displays a plugin model instance. :param instance: An instance of the ``model`` the plugin uses. :param request: The Django :class:`~django.http.HttpRequest` class containing the request parameters. :param kwargs: An optional slot for any new parameters. To render a plugin, either override this function, or specify the :attr:`render_template` variable, and optionally override :func:`get_context`. It is recommended to wrap the output in a ``<div>`` tag, to prevent the item from being displayed right next to the previous plugin. .. versionadded:: 1.0 The function may either return a string of HTML code, or return a :class:`~fluent_contents.models.ContentItemOutput` object which holds both the CSS/JS includes and HTML string. For the sake of convenience and simplicity, most examples only return a HTML string directly. When the user needs to be redirected, simply return a :class:`~django.http.HttpResponseRedirect` or call the :func:`redirect` method. To render raw HTML code, use :func:`~django.utils.safestring.mark_safe` on the returned HTML. """ render_template = self.get_render_template(request, instance, **kwargs) if not render_template: return str(_(u"{No rendering defined for class '%s'}" % self.__class__.__name__)) context = self.get_context(request, instance, **kwargs) return self.render_to_string(request, render_template, context)
python
def render(self, request, instance, **kwargs): """ The rendering/view function that displays a plugin model instance. :param instance: An instance of the ``model`` the plugin uses. :param request: The Django :class:`~django.http.HttpRequest` class containing the request parameters. :param kwargs: An optional slot for any new parameters. To render a plugin, either override this function, or specify the :attr:`render_template` variable, and optionally override :func:`get_context`. It is recommended to wrap the output in a ``<div>`` tag, to prevent the item from being displayed right next to the previous plugin. .. versionadded:: 1.0 The function may either return a string of HTML code, or return a :class:`~fluent_contents.models.ContentItemOutput` object which holds both the CSS/JS includes and HTML string. For the sake of convenience and simplicity, most examples only return a HTML string directly. When the user needs to be redirected, simply return a :class:`~django.http.HttpResponseRedirect` or call the :func:`redirect` method. To render raw HTML code, use :func:`~django.utils.safestring.mark_safe` on the returned HTML. """ render_template = self.get_render_template(request, instance, **kwargs) if not render_template: return str(_(u"{No rendering defined for class '%s'}" % self.__class__.__name__)) context = self.get_context(request, instance, **kwargs) return self.render_to_string(request, render_template, context)
[ "def", "render", "(", "self", ",", "request", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "render_template", "=", "self", ".", "get_render_template", "(", "request", ",", "instance", ",", "*", "*", "kwargs", ")", "if", "not", "render_template", ":", "return", "str", "(", "_", "(", "u\"{No rendering defined for class '%s'}\"", "%", "self", ".", "__class__", ".", "__name__", ")", ")", "context", "=", "self", ".", "get_context", "(", "request", ",", "instance", ",", "*", "*", "kwargs", ")", "return", "self", ".", "render_to_string", "(", "request", ",", "render_template", ",", "context", ")" ]
The rendering/view function that displays a plugin model instance. :param instance: An instance of the ``model`` the plugin uses. :param request: The Django :class:`~django.http.HttpRequest` class containing the request parameters. :param kwargs: An optional slot for any new parameters. To render a plugin, either override this function, or specify the :attr:`render_template` variable, and optionally override :func:`get_context`. It is recommended to wrap the output in a ``<div>`` tag, to prevent the item from being displayed right next to the previous plugin. .. versionadded:: 1.0 The function may either return a string of HTML code, or return a :class:`~fluent_contents.models.ContentItemOutput` object which holds both the CSS/JS includes and HTML string. For the sake of convenience and simplicity, most examples only return a HTML string directly. When the user needs to be redirected, simply return a :class:`~django.http.HttpResponseRedirect` or call the :func:`redirect` method. To render raw HTML code, use :func:`~django.utils.safestring.mark_safe` on the returned HTML.
[ "The", "rendering", "/", "view", "function", "that", "displays", "a", "plugin", "model", "instance", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginbase.py#L436-L466
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginbase.py
ContentPlugin.render_to_string
def render_to_string(self, request, template, context, content_instance=None): """ Render a custom template with the :class:`~PluginContext` as context instance. """ if not content_instance: content_instance = PluginContext(request) content_instance.update(context) return render_to_string(template, content_instance.flatten(), request=request)
python
def render_to_string(self, request, template, context, content_instance=None): """ Render a custom template with the :class:`~PluginContext` as context instance. """ if not content_instance: content_instance = PluginContext(request) content_instance.update(context) return render_to_string(template, content_instance.flatten(), request=request)
[ "def", "render_to_string", "(", "self", ",", "request", ",", "template", ",", "context", ",", "content_instance", "=", "None", ")", ":", "if", "not", "content_instance", ":", "content_instance", "=", "PluginContext", "(", "request", ")", "content_instance", ".", "update", "(", "context", ")", "return", "render_to_string", "(", "template", ",", "content_instance", ".", "flatten", "(", ")", ",", "request", "=", "request", ")" ]
Render a custom template with the :class:`~PluginContext` as context instance.
[ "Render", "a", "custom", "template", "with", "the", ":", "class", ":", "~PluginContext", "as", "context", "instance", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginbase.py#L468-L476
django-fluent/django-fluent-contents
fluent_contents/rendering/media.py
register_frontend_media
def register_frontend_media(request, media): """ Add a :class:`~django.forms.Media` class to the current request. This will be rendered by the ``render_plugin_media`` template tag. """ if not hasattr(request, '_fluent_contents_frontend_media'): request._fluent_contents_frontend_media = Media() add_media(request._fluent_contents_frontend_media, media)
python
def register_frontend_media(request, media): """ Add a :class:`~django.forms.Media` class to the current request. This will be rendered by the ``render_plugin_media`` template tag. """ if not hasattr(request, '_fluent_contents_frontend_media'): request._fluent_contents_frontend_media = Media() add_media(request._fluent_contents_frontend_media, media)
[ "def", "register_frontend_media", "(", "request", ",", "media", ")", ":", "if", "not", "hasattr", "(", "request", ",", "'_fluent_contents_frontend_media'", ")", ":", "request", ".", "_fluent_contents_frontend_media", "=", "Media", "(", ")", "add_media", "(", "request", ".", "_fluent_contents_frontend_media", ",", "media", ")" ]
Add a :class:`~django.forms.Media` class to the current request. This will be rendered by the ``render_plugin_media`` template tag.
[ "Add", "a", ":", "class", ":", "~django", ".", "forms", ".", "Media", "class", "to", "the", "current", "request", ".", "This", "will", "be", "rendered", "by", "the", "render_plugin_media", "template", "tag", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/rendering/media.py#L6-L14
django-fluent/django-fluent-contents
fluent_contents/cache.py
get_rendering_cache_key
def get_rendering_cache_key(placeholder_name, contentitem): """ Return a cache key for the content item output. .. seealso:: The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function can be used to remove the cache keys of a retrieved object. """ if not contentitem.pk: return None return "contentitem.@{0}.{1}.{2}".format( placeholder_name, contentitem.plugin.type_name, # always returns the upcasted name. contentitem.pk, # already unique per language_code )
python
def get_rendering_cache_key(placeholder_name, contentitem): """ Return a cache key for the content item output. .. seealso:: The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function can be used to remove the cache keys of a retrieved object. """ if not contentitem.pk: return None return "contentitem.@{0}.{1}.{2}".format( placeholder_name, contentitem.plugin.type_name, # always returns the upcasted name. contentitem.pk, # already unique per language_code )
[ "def", "get_rendering_cache_key", "(", "placeholder_name", ",", "contentitem", ")", ":", "if", "not", "contentitem", ".", "pk", ":", "return", "None", "return", "\"contentitem.@{0}.{1}.{2}\"", ".", "format", "(", "placeholder_name", ",", "contentitem", ".", "plugin", ".", "type_name", ",", "# always returns the upcasted name.", "contentitem", ".", "pk", ",", "# already unique per language_code", ")" ]
Return a cache key for the content item output. .. seealso:: The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function can be used to remove the cache keys of a retrieved object.
[ "Return", "a", "cache", "key", "for", "the", "content", "item", "output", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/cache.py#L7-L22
django-fluent/django-fluent-contents
fluent_contents/cache.py
get_placeholder_cache_key
def get_placeholder_cache_key(placeholder, language_code): """ Return a cache key for an existing placeholder object. This key is used to cache the entire output of a placeholder. """ return _get_placeholder_cache_key_for_id( placeholder.parent_type_id, placeholder.parent_id, placeholder.slot, language_code )
python
def get_placeholder_cache_key(placeholder, language_code): """ Return a cache key for an existing placeholder object. This key is used to cache the entire output of a placeholder. """ return _get_placeholder_cache_key_for_id( placeholder.parent_type_id, placeholder.parent_id, placeholder.slot, language_code )
[ "def", "get_placeholder_cache_key", "(", "placeholder", ",", "language_code", ")", ":", "return", "_get_placeholder_cache_key_for_id", "(", "placeholder", ".", "parent_type_id", ",", "placeholder", ".", "parent_id", ",", "placeholder", ".", "slot", ",", "language_code", ")" ]
Return a cache key for an existing placeholder object. This key is used to cache the entire output of a placeholder.
[ "Return", "a", "cache", "key", "for", "an", "existing", "placeholder", "object", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/cache.py#L25-L36
django-fluent/django-fluent-contents
fluent_contents/cache.py
get_placeholder_cache_key_for_parent
def get_placeholder_cache_key_for_parent(parent_object, placeholder_name, language_code): """ Return a cache key for a placeholder. This key is used to cache the entire output of a placeholder. """ parent_type = ContentType.objects.get_for_model(parent_object) return _get_placeholder_cache_key_for_id( parent_type.id, parent_object.pk, placeholder_name, language_code )
python
def get_placeholder_cache_key_for_parent(parent_object, placeholder_name, language_code): """ Return a cache key for a placeholder. This key is used to cache the entire output of a placeholder. """ parent_type = ContentType.objects.get_for_model(parent_object) return _get_placeholder_cache_key_for_id( parent_type.id, parent_object.pk, placeholder_name, language_code )
[ "def", "get_placeholder_cache_key_for_parent", "(", "parent_object", ",", "placeholder_name", ",", "language_code", ")", ":", "parent_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "parent_object", ")", "return", "_get_placeholder_cache_key_for_id", "(", "parent_type", ".", "id", ",", "parent_object", ".", "pk", ",", "placeholder_name", ",", "language_code", ")" ]
Return a cache key for a placeholder. This key is used to cache the entire output of a placeholder.
[ "Return", "a", "cache", "key", "for", "a", "placeholder", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/cache.py#L39-L51
django-fluent/django-fluent-contents
fluent_contents/management/commands/remove_stale_contentitems.py
Command.remove_stale_items
def remove_stale_items(self, stale_cts): """ See if there are items that point to a removed model. """ stale_ct_ids = list(stale_cts.keys()) items = (ContentItem.objects .non_polymorphic() # very important, or polymorphic skips them on fetching derived data .filter(polymorphic_ctype__in=stale_ct_ids) .order_by('polymorphic_ctype', 'pk') ) if not items: self.stdout.write("No stale items found.") return if self.dry_run: self.stdout.write("The following content items are stale:") else: self.stdout.write("The following content items were stale:") for item in items: ct = stale_cts[item.polymorphic_ctype_id] self.stdout.write("- #{id} points to removed {app_label}.{model}".format( id=item.pk, app_label=ct.app_label, model=ct.model )) if not self.dry_run: try: item.delete() except PluginNotFound: Model.delete(item)
python
def remove_stale_items(self, stale_cts): """ See if there are items that point to a removed model. """ stale_ct_ids = list(stale_cts.keys()) items = (ContentItem.objects .non_polymorphic() # very important, or polymorphic skips them on fetching derived data .filter(polymorphic_ctype__in=stale_ct_ids) .order_by('polymorphic_ctype', 'pk') ) if not items: self.stdout.write("No stale items found.") return if self.dry_run: self.stdout.write("The following content items are stale:") else: self.stdout.write("The following content items were stale:") for item in items: ct = stale_cts[item.polymorphic_ctype_id] self.stdout.write("- #{id} points to removed {app_label}.{model}".format( id=item.pk, app_label=ct.app_label, model=ct.model )) if not self.dry_run: try: item.delete() except PluginNotFound: Model.delete(item)
[ "def", "remove_stale_items", "(", "self", ",", "stale_cts", ")", ":", "stale_ct_ids", "=", "list", "(", "stale_cts", ".", "keys", "(", ")", ")", "items", "=", "(", "ContentItem", ".", "objects", ".", "non_polymorphic", "(", ")", "# very important, or polymorphic skips them on fetching derived data", ".", "filter", "(", "polymorphic_ctype__in", "=", "stale_ct_ids", ")", ".", "order_by", "(", "'polymorphic_ctype'", ",", "'pk'", ")", ")", "if", "not", "items", ":", "self", ".", "stdout", ".", "write", "(", "\"No stale items found.\"", ")", "return", "if", "self", ".", "dry_run", ":", "self", ".", "stdout", ".", "write", "(", "\"The following content items are stale:\"", ")", "else", ":", "self", ".", "stdout", ".", "write", "(", "\"The following content items were stale:\"", ")", "for", "item", "in", "items", ":", "ct", "=", "stale_cts", "[", "item", ".", "polymorphic_ctype_id", "]", "self", ".", "stdout", ".", "write", "(", "\"- #{id} points to removed {app_label}.{model}\"", ".", "format", "(", "id", "=", "item", ".", "pk", ",", "app_label", "=", "ct", ".", "app_label", ",", "model", "=", "ct", ".", "model", ")", ")", "if", "not", "self", ".", "dry_run", ":", "try", ":", "item", ".", "delete", "(", ")", "except", "PluginNotFound", ":", "Model", ".", "delete", "(", "item", ")" ]
See if there are items that point to a removed model.
[ "See", "if", "there", "are", "items", "that", "point", "to", "a", "removed", "model", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/management/commands/remove_stale_contentitems.py#L39-L68
django-fluent/django-fluent-contents
fluent_contents/management/commands/remove_stale_contentitems.py
Command.remove_unreferenced_items
def remove_unreferenced_items(self, stale_cts): """ See if there are items that no longer point to an existing parent. """ stale_ct_ids = list(stale_cts.keys()) parent_types = (ContentItem.objects.order_by() .exclude(polymorphic_ctype__in=stale_ct_ids) .values_list('parent_type', flat=True).distinct()) num_unreferenced = 0 for ct_id in parent_types: parent_ct = ContentType.objects.get_for_id(ct_id) unreferenced_items = (ContentItem.objects .filter(parent_type=ct_id) .order_by('polymorphic_ctype', 'pk')) if parent_ct.model_class() is not None: # Only select the items that are part of removed pages, # unless the parent type was removed - then removing all is correct. unreferenced_items = unreferenced_items.exclude( parent_id__in=parent_ct.get_all_objects_for_this_type() ) if unreferenced_items: for item in unreferenced_items: self.stdout.write( "- {cls}#{id} points to nonexisting {app_label}.{model}".format( cls=item.__class__.__name__, id=item.pk, app_label=parent_ct.app_label, model=parent_ct.model )) num_unreferenced += 1 if not self.dry_run and self.remove_unreferenced: item.delete() if not num_unreferenced: self.stdout.write("No unreferenced items found.") else: self.stdout.write("{0} unreferenced items found.".format(num_unreferenced)) if not self.remove_unreferenced: self.stdout.write("Re-run this command with --remove-unreferenced to remove these items")
python
def remove_unreferenced_items(self, stale_cts): """ See if there are items that no longer point to an existing parent. """ stale_ct_ids = list(stale_cts.keys()) parent_types = (ContentItem.objects.order_by() .exclude(polymorphic_ctype__in=stale_ct_ids) .values_list('parent_type', flat=True).distinct()) num_unreferenced = 0 for ct_id in parent_types: parent_ct = ContentType.objects.get_for_id(ct_id) unreferenced_items = (ContentItem.objects .filter(parent_type=ct_id) .order_by('polymorphic_ctype', 'pk')) if parent_ct.model_class() is not None: # Only select the items that are part of removed pages, # unless the parent type was removed - then removing all is correct. unreferenced_items = unreferenced_items.exclude( parent_id__in=parent_ct.get_all_objects_for_this_type() ) if unreferenced_items: for item in unreferenced_items: self.stdout.write( "- {cls}#{id} points to nonexisting {app_label}.{model}".format( cls=item.__class__.__name__, id=item.pk, app_label=parent_ct.app_label, model=parent_ct.model )) num_unreferenced += 1 if not self.dry_run and self.remove_unreferenced: item.delete() if not num_unreferenced: self.stdout.write("No unreferenced items found.") else: self.stdout.write("{0} unreferenced items found.".format(num_unreferenced)) if not self.remove_unreferenced: self.stdout.write("Re-run this command with --remove-unreferenced to remove these items")
[ "def", "remove_unreferenced_items", "(", "self", ",", "stale_cts", ")", ":", "stale_ct_ids", "=", "list", "(", "stale_cts", ".", "keys", "(", ")", ")", "parent_types", "=", "(", "ContentItem", ".", "objects", ".", "order_by", "(", ")", ".", "exclude", "(", "polymorphic_ctype__in", "=", "stale_ct_ids", ")", ".", "values_list", "(", "'parent_type'", ",", "flat", "=", "True", ")", ".", "distinct", "(", ")", ")", "num_unreferenced", "=", "0", "for", "ct_id", "in", "parent_types", ":", "parent_ct", "=", "ContentType", ".", "objects", ".", "get_for_id", "(", "ct_id", ")", "unreferenced_items", "=", "(", "ContentItem", ".", "objects", ".", "filter", "(", "parent_type", "=", "ct_id", ")", ".", "order_by", "(", "'polymorphic_ctype'", ",", "'pk'", ")", ")", "if", "parent_ct", ".", "model_class", "(", ")", "is", "not", "None", ":", "# Only select the items that are part of removed pages,", "# unless the parent type was removed - then removing all is correct.", "unreferenced_items", "=", "unreferenced_items", ".", "exclude", "(", "parent_id__in", "=", "parent_ct", ".", "get_all_objects_for_this_type", "(", ")", ")", "if", "unreferenced_items", ":", "for", "item", "in", "unreferenced_items", ":", "self", ".", "stdout", ".", "write", "(", "\"- {cls}#{id} points to nonexisting {app_label}.{model}\"", ".", "format", "(", "cls", "=", "item", ".", "__class__", ".", "__name__", ",", "id", "=", "item", ".", "pk", ",", "app_label", "=", "parent_ct", ".", "app_label", ",", "model", "=", "parent_ct", ".", "model", ")", ")", "num_unreferenced", "+=", "1", "if", "not", "self", ".", "dry_run", "and", "self", ".", "remove_unreferenced", ":", "item", ".", "delete", "(", ")", "if", "not", "num_unreferenced", ":", "self", ".", "stdout", ".", "write", "(", "\"No unreferenced items found.\"", ")", "else", ":", "self", ".", "stdout", ".", "write", "(", "\"{0} unreferenced items found.\"", ".", "format", "(", "num_unreferenced", ")", ")", "if", "not", "self", ".", "remove_unreferenced", ":", "self", ".", "stdout", ".", "write", "(", "\"Re-run this command with --remove-unreferenced to remove these items\"", ")" ]
See if there are items that no longer point to an existing parent.
[ "See", "if", "there", "are", "items", "that", "no", "longer", "point", "to", "an", "existing", "parent", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/management/commands/remove_stale_contentitems.py#L70-L110
django-fluent/django-fluent-contents
fluent_contents/admin/genericextensions.py
BaseInitialGenericInlineFormSet.__initial_minus_queryset
def __initial_minus_queryset(self): """ Gives all elements from self._initial having a slot value that is not already in self.get_queryset() """ queryset = self.get_queryset() def initial_not_in_queryset(initial): for x in queryset: if x.slot == initial['slot']: return False return True return list(filter(initial_not_in_queryset, self._initial))
python
def __initial_minus_queryset(self): """ Gives all elements from self._initial having a slot value that is not already in self.get_queryset() """ queryset = self.get_queryset() def initial_not_in_queryset(initial): for x in queryset: if x.slot == initial['slot']: return False return True return list(filter(initial_not_in_queryset, self._initial))
[ "def", "__initial_minus_queryset", "(", "self", ")", ":", "queryset", "=", "self", ".", "get_queryset", "(", ")", "def", "initial_not_in_queryset", "(", "initial", ")", ":", "for", "x", "in", "queryset", ":", "if", "x", ".", "slot", "==", "initial", "[", "'slot'", "]", ":", "return", "False", "return", "True", "return", "list", "(", "filter", "(", "initial_not_in_queryset", ",", "self", ".", "_initial", ")", ")" ]
Gives all elements from self._initial having a slot value that is not already in self.get_queryset()
[ "Gives", "all", "elements", "from", "self", ".", "_initial", "having", "a", "slot", "value", "that", "is", "not", "already", "in", "self", ".", "get_queryset", "()" ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/admin/genericextensions.py#L60-L74
django-fluent/django-fluent-contents
fluent_contents/templatetags/fluent_contents_tags.py
_get_placeholder_arg
def _get_placeholder_arg(arg_name, placeholder): """ Validate and return the Placeholder object that the template variable points to. """ if placeholder is None: raise RuntimeWarning(u"placeholder object is None") elif isinstance(placeholder, Placeholder): return placeholder elif isinstance(placeholder, Manager): manager = placeholder try: parent_object = manager.instance # read RelatedManager code except AttributeError: parent_object = None try: placeholder = manager.all()[0] if parent_object is not None: placeholder.parent = parent_object # Fill GFK cache return placeholder except IndexError: raise RuntimeWarning(u"No placeholders found for query '{0}.all.0'".format(arg_name)) else: raise ValueError(u"The field '{0}' does not refer to a placeholder object!".format(arg_name))
python
def _get_placeholder_arg(arg_name, placeholder): """ Validate and return the Placeholder object that the template variable points to. """ if placeholder is None: raise RuntimeWarning(u"placeholder object is None") elif isinstance(placeholder, Placeholder): return placeholder elif isinstance(placeholder, Manager): manager = placeholder try: parent_object = manager.instance # read RelatedManager code except AttributeError: parent_object = None try: placeholder = manager.all()[0] if parent_object is not None: placeholder.parent = parent_object # Fill GFK cache return placeholder except IndexError: raise RuntimeWarning(u"No placeholders found for query '{0}.all.0'".format(arg_name)) else: raise ValueError(u"The field '{0}' does not refer to a placeholder object!".format(arg_name))
[ "def", "_get_placeholder_arg", "(", "arg_name", ",", "placeholder", ")", ":", "if", "placeholder", "is", "None", ":", "raise", "RuntimeWarning", "(", "u\"placeholder object is None\"", ")", "elif", "isinstance", "(", "placeholder", ",", "Placeholder", ")", ":", "return", "placeholder", "elif", "isinstance", "(", "placeholder", ",", "Manager", ")", ":", "manager", "=", "placeholder", "try", ":", "parent_object", "=", "manager", ".", "instance", "# read RelatedManager code", "except", "AttributeError", ":", "parent_object", "=", "None", "try", ":", "placeholder", "=", "manager", ".", "all", "(", ")", "[", "0", "]", "if", "parent_object", "is", "not", "None", ":", "placeholder", ".", "parent", "=", "parent_object", "# Fill GFK cache", "return", "placeholder", "except", "IndexError", ":", "raise", "RuntimeWarning", "(", "u\"No placeholders found for query '{0}.all.0'\"", ".", "format", "(", "arg_name", ")", ")", "else", ":", "raise", "ValueError", "(", "u\"The field '{0}' does not refer to a placeholder object!\"", ".", "format", "(", "arg_name", ")", ")" ]
Validate and return the Placeholder object that the template variable points to.
[ "Validate", "and", "return", "the", "Placeholder", "object", "that", "the", "template", "variable", "points", "to", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/templatetags/fluent_contents_tags.py#L287-L310
django-fluent/django-fluent-contents
fluent_contents/templatetags/fluent_contents_tags.py
_split_js
def _split_js(media, domain): """ Extract the local or external URLs from a Media object. """ # Read internal property without creating new Media instance. if not media._js: return ImmutableMedia.empty_instance needs_local = domain == 'local' new_js = [] for url in media._js: if needs_local == _is_local(url): new_js.append(url) if not new_js: return ImmutableMedia.empty_instance else: return Media(js=new_js)
python
def _split_js(media, domain): """ Extract the local or external URLs from a Media object. """ # Read internal property without creating new Media instance. if not media._js: return ImmutableMedia.empty_instance needs_local = domain == 'local' new_js = [] for url in media._js: if needs_local == _is_local(url): new_js.append(url) if not new_js: return ImmutableMedia.empty_instance else: return Media(js=new_js)
[ "def", "_split_js", "(", "media", ",", "domain", ")", ":", "# Read internal property without creating new Media instance.", "if", "not", "media", ".", "_js", ":", "return", "ImmutableMedia", ".", "empty_instance", "needs_local", "=", "domain", "==", "'local'", "new_js", "=", "[", "]", "for", "url", "in", "media", ".", "_js", ":", "if", "needs_local", "==", "_is_local", "(", "url", ")", ":", "new_js", ".", "append", "(", "url", ")", "if", "not", "new_js", ":", "return", "ImmutableMedia", ".", "empty_instance", "else", ":", "return", "Media", "(", "js", "=", "new_js", ")" ]
Extract the local or external URLs from a Media object.
[ "Extract", "the", "local", "or", "external", "URLs", "from", "a", "Media", "object", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/templatetags/fluent_contents_tags.py#L382-L399
django-fluent/django-fluent-contents
fluent_contents/templatetags/fluent_contents_tags.py
_split_css
def _split_css(media, domain): """ Extract the local or external URLs from a Media object. """ # Read internal property without creating new Media instance. if not media._css: return ImmutableMedia.empty_instance needs_local = domain == 'local' new_css = {} for medium, url in six.iteritems(media._css): if needs_local == _is_local(url): new_css.setdefault(medium, []).append(url) if not new_css: return ImmutableMedia.empty_instance else: return Media(css=new_css)
python
def _split_css(media, domain): """ Extract the local or external URLs from a Media object. """ # Read internal property without creating new Media instance. if not media._css: return ImmutableMedia.empty_instance needs_local = domain == 'local' new_css = {} for medium, url in six.iteritems(media._css): if needs_local == _is_local(url): new_css.setdefault(medium, []).append(url) if not new_css: return ImmutableMedia.empty_instance else: return Media(css=new_css)
[ "def", "_split_css", "(", "media", ",", "domain", ")", ":", "# Read internal property without creating new Media instance.", "if", "not", "media", ".", "_css", ":", "return", "ImmutableMedia", ".", "empty_instance", "needs_local", "=", "domain", "==", "'local'", "new_css", "=", "{", "}", "for", "medium", ",", "url", "in", "six", ".", "iteritems", "(", "media", ".", "_css", ")", ":", "if", "needs_local", "==", "_is_local", "(", "url", ")", ":", "new_css", ".", "setdefault", "(", "medium", ",", "[", "]", ")", ".", "append", "(", "url", ")", "if", "not", "new_css", ":", "return", "ImmutableMedia", ".", "empty_instance", "else", ":", "return", "Media", "(", "css", "=", "new_css", ")" ]
Extract the local or external URLs from a Media object.
[ "Extract", "the", "local", "or", "external", "URLs", "from", "a", "Media", "object", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/templatetags/fluent_contents_tags.py#L402-L419
django-fluent/django-fluent-contents
fluent_contents/templatetags/fluent_contents_tags.py
PagePlaceholderNode.parse
def parse(cls, parser, token): """ Parse the node syntax: .. code-block:: html+django {% page_placeholder parentobj slotname title="test" role="m" %} """ bits, as_var = parse_as_var(parser, token) tag_name, args, kwargs = parse_token_kwargs(parser, bits, allowed_kwargs=cls.allowed_kwargs, compile_args=True, compile_kwargs=True) # Play with the arguments if len(args) == 2: parent_expr = args[0] slot_expr = args[1] elif len(args) == 1: # Allow 'page' by default. Works with most CMS'es, including django-fluent-pages. parent_expr = Variable('page') slot_expr = args[0] else: raise TemplateSyntaxError("""{0} tag allows two arguments: 'parent object' 'slot name' and optionally: title=".." role="..".""".format(tag_name)) cls.validate_args(tag_name, *args, **kwargs) return cls( tag_name=tag_name, as_var=as_var, parent_expr=parent_expr, slot_expr=slot_expr, **kwargs )
python
def parse(cls, parser, token): """ Parse the node syntax: .. code-block:: html+django {% page_placeholder parentobj slotname title="test" role="m" %} """ bits, as_var = parse_as_var(parser, token) tag_name, args, kwargs = parse_token_kwargs(parser, bits, allowed_kwargs=cls.allowed_kwargs, compile_args=True, compile_kwargs=True) # Play with the arguments if len(args) == 2: parent_expr = args[0] slot_expr = args[1] elif len(args) == 1: # Allow 'page' by default. Works with most CMS'es, including django-fluent-pages. parent_expr = Variable('page') slot_expr = args[0] else: raise TemplateSyntaxError("""{0} tag allows two arguments: 'parent object' 'slot name' and optionally: title=".." role="..".""".format(tag_name)) cls.validate_args(tag_name, *args, **kwargs) return cls( tag_name=tag_name, as_var=as_var, parent_expr=parent_expr, slot_expr=slot_expr, **kwargs )
[ "def", "parse", "(", "cls", ",", "parser", ",", "token", ")", ":", "bits", ",", "as_var", "=", "parse_as_var", "(", "parser", ",", "token", ")", "tag_name", ",", "args", ",", "kwargs", "=", "parse_token_kwargs", "(", "parser", ",", "bits", ",", "allowed_kwargs", "=", "cls", ".", "allowed_kwargs", ",", "compile_args", "=", "True", ",", "compile_kwargs", "=", "True", ")", "# Play with the arguments", "if", "len", "(", "args", ")", "==", "2", ":", "parent_expr", "=", "args", "[", "0", "]", "slot_expr", "=", "args", "[", "1", "]", "elif", "len", "(", "args", ")", "==", "1", ":", "# Allow 'page' by default. Works with most CMS'es, including django-fluent-pages.", "parent_expr", "=", "Variable", "(", "'page'", ")", "slot_expr", "=", "args", "[", "0", "]", "else", ":", "raise", "TemplateSyntaxError", "(", "\"\"\"{0} tag allows two arguments: 'parent object' 'slot name' and optionally: title=\"..\" role=\"..\".\"\"\"", ".", "format", "(", "tag_name", ")", ")", "cls", ".", "validate_args", "(", "tag_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "cls", "(", "tag_name", "=", "tag_name", ",", "as_var", "=", "as_var", ",", "parent_expr", "=", "parent_expr", ",", "slot_expr", "=", "slot_expr", ",", "*", "*", "kwargs", ")" ]
Parse the node syntax: .. code-block:: html+django {% page_placeholder parentobj slotname title="test" role="m" %}
[ "Parse", "the", "node", "syntax", ":" ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/templatetags/fluent_contents_tags.py#L114-L143
django-fluent/django-fluent-contents
fluent_contents/templatetags/fluent_contents_tags.py
PagePlaceholderNode.get_title
def get_title(self): """ Return the string literal that is used in the template. The title is used in the admin screens. """ try: return extract_literal(self.meta_kwargs['title']) except KeyError: slot = self.get_slot() if slot is not None: return slot.replace('_', ' ').title() return None
python
def get_title(self): """ Return the string literal that is used in the template. The title is used in the admin screens. """ try: return extract_literal(self.meta_kwargs['title']) except KeyError: slot = self.get_slot() if slot is not None: return slot.replace('_', ' ').title() return None
[ "def", "get_title", "(", "self", ")", ":", "try", ":", "return", "extract_literal", "(", "self", ".", "meta_kwargs", "[", "'title'", "]", ")", "except", "KeyError", ":", "slot", "=", "self", ".", "get_slot", "(", ")", "if", "slot", "is", "not", "None", ":", "return", "slot", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "title", "(", ")", "return", "None" ]
Return the string literal that is used in the template. The title is used in the admin screens.
[ "Return", "the", "string", "literal", "that", "is", "used", "in", "the", "template", ".", "The", "title", "is", "used", "in", "the", "admin", "screens", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/templatetags/fluent_contents_tags.py#L152-L164
django-fluent/django-fluent-contents
fluent_contents/utils/templatetags.py
extract_literal
def extract_literal(templatevar): """ See if a template FilterExpression holds a literal value. :type templatevar: django.template.FilterExpression :rtype: bool|None """ # FilterExpression contains another 'var' that either contains a Variable or SafeData object. if hasattr(templatevar, 'var'): templatevar = templatevar.var if isinstance(templatevar, SafeData): # Literal in FilterExpression, can return. return templatevar else: # Variable in FilterExpression, not going to work here. return None if templatevar[0] in ('"', "'") and templatevar[-1] in ('"', "'"): return templatevar[1:-1] else: return None
python
def extract_literal(templatevar): """ See if a template FilterExpression holds a literal value. :type templatevar: django.template.FilterExpression :rtype: bool|None """ # FilterExpression contains another 'var' that either contains a Variable or SafeData object. if hasattr(templatevar, 'var'): templatevar = templatevar.var if isinstance(templatevar, SafeData): # Literal in FilterExpression, can return. return templatevar else: # Variable in FilterExpression, not going to work here. return None if templatevar[0] in ('"', "'") and templatevar[-1] in ('"', "'"): return templatevar[1:-1] else: return None
[ "def", "extract_literal", "(", "templatevar", ")", ":", "# FilterExpression contains another 'var' that either contains a Variable or SafeData object.", "if", "hasattr", "(", "templatevar", ",", "'var'", ")", ":", "templatevar", "=", "templatevar", ".", "var", "if", "isinstance", "(", "templatevar", ",", "SafeData", ")", ":", "# Literal in FilterExpression, can return.", "return", "templatevar", "else", ":", "# Variable in FilterExpression, not going to work here.", "return", "None", "if", "templatevar", "[", "0", "]", "in", "(", "'\"'", ",", "\"'\"", ")", "and", "templatevar", "[", "-", "1", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ":", "return", "templatevar", "[", "1", ":", "-", "1", "]", "else", ":", "return", "None" ]
See if a template FilterExpression holds a literal value. :type templatevar: django.template.FilterExpression :rtype: bool|None
[ "See", "if", "a", "template", "FilterExpression", "holds", "a", "literal", "value", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/utils/templatetags.py#L11-L31
django-fluent/django-fluent-contents
fluent_contents/utils/templatetags.py
extract_literal_bool
def extract_literal_bool(templatevar): """ See if a template FilterExpression holds a literal boolean value. :type templatevar: django.template.FilterExpression :rtype: bool|None """ # FilterExpression contains another 'var' that either contains a Variable or SafeData object. if hasattr(templatevar, 'var'): templatevar = templatevar.var if isinstance(templatevar, SafeData): # Literal in FilterExpression, can return. return is_true(templatevar) else: # Variable in FilterExpression, not going to work here. return None return is_true(templatevar)
python
def extract_literal_bool(templatevar): """ See if a template FilterExpression holds a literal boolean value. :type templatevar: django.template.FilterExpression :rtype: bool|None """ # FilterExpression contains another 'var' that either contains a Variable or SafeData object. if hasattr(templatevar, 'var'): templatevar = templatevar.var if isinstance(templatevar, SafeData): # Literal in FilterExpression, can return. return is_true(templatevar) else: # Variable in FilterExpression, not going to work here. return None return is_true(templatevar)
[ "def", "extract_literal_bool", "(", "templatevar", ")", ":", "# FilterExpression contains another 'var' that either contains a Variable or SafeData object.", "if", "hasattr", "(", "templatevar", ",", "'var'", ")", ":", "templatevar", "=", "templatevar", ".", "var", "if", "isinstance", "(", "templatevar", ",", "SafeData", ")", ":", "# Literal in FilterExpression, can return.", "return", "is_true", "(", "templatevar", ")", "else", ":", "# Variable in FilterExpression, not going to work here.", "return", "None", "return", "is_true", "(", "templatevar", ")" ]
See if a template FilterExpression holds a literal boolean value. :type templatevar: django.template.FilterExpression :rtype: bool|None
[ "See", "if", "a", "template", "FilterExpression", "holds", "a", "literal", "boolean", "value", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/utils/templatetags.py#L34-L51
django-fluent/django-fluent-contents
fluent_contents/plugins/markup/content_plugins.py
_create_markup_plugin
def _create_markup_plugin(language, model): """ Create a new MarkupPlugin class that represents the plugin type. """ form = type("{0}MarkupItemForm".format(language.capitalize()), (MarkupItemForm,), { 'default_language': language, }) classname = "{0}MarkupPlugin".format(language.capitalize()) PluginClass = type(classname, (MarkupPluginBase,), { 'model': model, 'form': form, }) return PluginClass
python
def _create_markup_plugin(language, model): """ Create a new MarkupPlugin class that represents the plugin type. """ form = type("{0}MarkupItemForm".format(language.capitalize()), (MarkupItemForm,), { 'default_language': language, }) classname = "{0}MarkupPlugin".format(language.capitalize()) PluginClass = type(classname, (MarkupPluginBase,), { 'model': model, 'form': form, }) return PluginClass
[ "def", "_create_markup_plugin", "(", "language", ",", "model", ")", ":", "form", "=", "type", "(", "\"{0}MarkupItemForm\"", ".", "format", "(", "language", ".", "capitalize", "(", ")", ")", ",", "(", "MarkupItemForm", ",", ")", ",", "{", "'default_language'", ":", "language", ",", "}", ")", "classname", "=", "\"{0}MarkupPlugin\"", ".", "format", "(", "language", ".", "capitalize", "(", ")", ")", "PluginClass", "=", "type", "(", "classname", ",", "(", "MarkupPluginBase", ",", ")", ",", "{", "'model'", ":", "model", ",", "'form'", ":", "form", ",", "}", ")", "return", "PluginClass" ]
Create a new MarkupPlugin class that represents the plugin type.
[ "Create", "a", "new", "MarkupPlugin", "class", "that", "represents", "the", "plugin", "type", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/plugins/markup/content_plugins.py#L43-L57
django-fluent/django-fluent-contents
fluent_contents/models/managers.py
get_parent_lookup_kwargs
def get_parent_lookup_kwargs(parent_object): """ Return lookup arguments for the generic ``parent_type`` / ``parent_id`` fields. :param parent_object: The parent object. :type parent_object: :class:`~django.db.models.Model` """ if parent_object is None: return dict( parent_type__isnull=True, parent_id=0 ) elif isinstance(parent_object, models.Model): return dict( parent_type=ContentType.objects.get_for_model(parent_object), parent_id=parent_object.pk ) else: raise ValueError("parent_object is not a model!")
python
def get_parent_lookup_kwargs(parent_object): """ Return lookup arguments for the generic ``parent_type`` / ``parent_id`` fields. :param parent_object: The parent object. :type parent_object: :class:`~django.db.models.Model` """ if parent_object is None: return dict( parent_type__isnull=True, parent_id=0 ) elif isinstance(parent_object, models.Model): return dict( parent_type=ContentType.objects.get_for_model(parent_object), parent_id=parent_object.pk ) else: raise ValueError("parent_object is not a model!")
[ "def", "get_parent_lookup_kwargs", "(", "parent_object", ")", ":", "if", "parent_object", "is", "None", ":", "return", "dict", "(", "parent_type__isnull", "=", "True", ",", "parent_id", "=", "0", ")", "elif", "isinstance", "(", "parent_object", ",", "models", ".", "Model", ")", ":", "return", "dict", "(", "parent_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "parent_object", ")", ",", "parent_id", "=", "parent_object", ".", "pk", ")", "else", ":", "raise", "ValueError", "(", "\"parent_object is not a model!\"", ")" ]
Return lookup arguments for the generic ``parent_type`` / ``parent_id`` fields. :param parent_object: The parent object. :type parent_object: :class:`~django.db.models.Model`
[ "Return", "lookup", "arguments", "for", "the", "generic", "parent_type", "/", "parent_id", "fields", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/models/managers.py#L201-L219
django-fluent/django-fluent-contents
fluent_contents/models/managers.py
get_parent_active_language_choices
def get_parent_active_language_choices(parent_object, exclude_current=False): """ .. versionadded:: 1.0 Get the currently active languages of an parent object. Note: if there is no content at the page, the language won't be returned. """ assert parent_object is not None, "Missing parent_object!" from .db import ContentItem qs = ContentItem.objects \ .parent(parent_object, limit_parent_language=False) \ .values_list('language_code', flat=True).distinct() languages = set(qs) if exclude_current: parent_lang = get_parent_language_code(parent_object) languages.discard(parent_lang) if parler_appsettings.PARLER_LANGUAGES and not parler_appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS: site_id = get_parent_site_id(parent_object) try: lang_dict = parler_appsettings.PARLER_LANGUAGES[site_id] except KeyError: lang_dict = () allowed_languages = set(item['code'] for item in lang_dict) languages &= allowed_languages # No multithreading issue here, object is instantiated for this user only. choices = [(lang, str(get_language_title(lang))) for lang in languages if lang] choices.sort(key=lambda tup: tup[1]) return choices
python
def get_parent_active_language_choices(parent_object, exclude_current=False): """ .. versionadded:: 1.0 Get the currently active languages of an parent object. Note: if there is no content at the page, the language won't be returned. """ assert parent_object is not None, "Missing parent_object!" from .db import ContentItem qs = ContentItem.objects \ .parent(parent_object, limit_parent_language=False) \ .values_list('language_code', flat=True).distinct() languages = set(qs) if exclude_current: parent_lang = get_parent_language_code(parent_object) languages.discard(parent_lang) if parler_appsettings.PARLER_LANGUAGES and not parler_appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS: site_id = get_parent_site_id(parent_object) try: lang_dict = parler_appsettings.PARLER_LANGUAGES[site_id] except KeyError: lang_dict = () allowed_languages = set(item['code'] for item in lang_dict) languages &= allowed_languages # No multithreading issue here, object is instantiated for this user only. choices = [(lang, str(get_language_title(lang))) for lang in languages if lang] choices.sort(key=lambda tup: tup[1]) return choices
[ "def", "get_parent_active_language_choices", "(", "parent_object", ",", "exclude_current", "=", "False", ")", ":", "assert", "parent_object", "is", "not", "None", ",", "\"Missing parent_object!\"", "from", ".", "db", "import", "ContentItem", "qs", "=", "ContentItem", ".", "objects", ".", "parent", "(", "parent_object", ",", "limit_parent_language", "=", "False", ")", ".", "values_list", "(", "'language_code'", ",", "flat", "=", "True", ")", ".", "distinct", "(", ")", "languages", "=", "set", "(", "qs", ")", "if", "exclude_current", ":", "parent_lang", "=", "get_parent_language_code", "(", "parent_object", ")", "languages", ".", "discard", "(", "parent_lang", ")", "if", "parler_appsettings", ".", "PARLER_LANGUAGES", "and", "not", "parler_appsettings", ".", "PARLER_SHOW_EXCLUDED_LANGUAGE_TABS", ":", "site_id", "=", "get_parent_site_id", "(", "parent_object", ")", "try", ":", "lang_dict", "=", "parler_appsettings", ".", "PARLER_LANGUAGES", "[", "site_id", "]", "except", "KeyError", ":", "lang_dict", "=", "(", ")", "allowed_languages", "=", "set", "(", "item", "[", "'code'", "]", "for", "item", "in", "lang_dict", ")", "languages", "&=", "allowed_languages", "# No multithreading issue here, object is instantiated for this user only.", "choices", "=", "[", "(", "lang", ",", "str", "(", "get_language_title", "(", "lang", ")", ")", ")", "for", "lang", "in", "languages", "if", "lang", "]", "choices", ".", "sort", "(", "key", "=", "lambda", "tup", ":", "tup", "[", "1", "]", ")", "return", "choices" ]
.. versionadded:: 1.0 Get the currently active languages of an parent object. Note: if there is no content at the page, the language won't be returned.
[ "..", "versionadded", "::", "1", ".", "0" ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/models/managers.py#L248-L282
django-fluent/django-fluent-contents
fluent_contents/models/managers.py
PlaceholderManager.get_by_slot
def get_by_slot(self, parent_object, slot): """ Return a placeholder by key. """ placeholder = self.parent(parent_object).get(slot=slot) placeholder.parent = parent_object # fill the reverse cache return placeholder
python
def get_by_slot(self, parent_object, slot): """ Return a placeholder by key. """ placeholder = self.parent(parent_object).get(slot=slot) placeholder.parent = parent_object # fill the reverse cache return placeholder
[ "def", "get_by_slot", "(", "self", ",", "parent_object", ",", "slot", ")", ":", "placeholder", "=", "self", ".", "parent", "(", "parent_object", ")", ".", "get", "(", "slot", "=", "slot", ")", "placeholder", ".", "parent", "=", "parent_object", "# fill the reverse cache", "return", "placeholder" ]
Return a placeholder by key.
[ "Return", "a", "placeholder", "by", "key", "." ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/models/managers.py#L28-L34
django-fluent/django-fluent-contents
fluent_contents/models/managers.py
PlaceholderManager.create_for_object
def create_for_object(self, parent_object, slot, role='m', title=None): """ Create a placeholder with the given parameters """ from .db import Placeholder parent_attrs = get_parent_lookup_kwargs(parent_object) obj = self.create( slot=slot, role=role or Placeholder.MAIN, title=title or slot.title().replace('_', ' '), **parent_attrs ) obj.parent = parent_object # fill the reverse cache return obj
python
def create_for_object(self, parent_object, slot, role='m', title=None): """ Create a placeholder with the given parameters """ from .db import Placeholder parent_attrs = get_parent_lookup_kwargs(parent_object) obj = self.create( slot=slot, role=role or Placeholder.MAIN, title=title or slot.title().replace('_', ' '), **parent_attrs ) obj.parent = parent_object # fill the reverse cache return obj
[ "def", "create_for_object", "(", "self", ",", "parent_object", ",", "slot", ",", "role", "=", "'m'", ",", "title", "=", "None", ")", ":", "from", ".", "db", "import", "Placeholder", "parent_attrs", "=", "get_parent_lookup_kwargs", "(", "parent_object", ")", "obj", "=", "self", ".", "create", "(", "slot", "=", "slot", ",", "role", "=", "role", "or", "Placeholder", ".", "MAIN", ",", "title", "=", "title", "or", "slot", ".", "title", "(", ")", ".", "replace", "(", "'_'", ",", "' '", ")", ",", "*", "*", "parent_attrs", ")", "obj", ".", "parent", "=", "parent_object", "# fill the reverse cache", "return", "obj" ]
Create a placeholder with the given parameters
[ "Create", "a", "placeholder", "with", "the", "given", "parameters" ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/models/managers.py#L36-L49
django-fluent/django-fluent-contents
fluent_contents/models/managers.py
ContentItemQuerySet.translated
def translated(self, *language_codes): """ .. versionadded:: 1.0 Only return translated objects which of the given languages. When no language codes are given, only the currently active language is returned. """ # this API has the same semantics as django-parler's .translated() for familiarity. # However, since this package doesn't filter in a related field, the ORM limitations don't apply. if not language_codes: language_codes = (get_language(),) else: # Since some code operates on a True/str switch, make sure that doesn't drip into this low level code. for language_code in language_codes: if not isinstance(language_code, six.string_types) or language_code.lower() in ('1', '0', 'true', 'false'): raise ValueError("ContentItemQuerySet.translated() expected language_code to be an ISO code") if len(language_codes) == 1: return self.filter(language_code=language_codes[0]) else: return self.filter(language_code__in=language_codes)
python
def translated(self, *language_codes): """ .. versionadded:: 1.0 Only return translated objects which of the given languages. When no language codes are given, only the currently active language is returned. """ # this API has the same semantics as django-parler's .translated() for familiarity. # However, since this package doesn't filter in a related field, the ORM limitations don't apply. if not language_codes: language_codes = (get_language(),) else: # Since some code operates on a True/str switch, make sure that doesn't drip into this low level code. for language_code in language_codes: if not isinstance(language_code, six.string_types) or language_code.lower() in ('1', '0', 'true', 'false'): raise ValueError("ContentItemQuerySet.translated() expected language_code to be an ISO code") if len(language_codes) == 1: return self.filter(language_code=language_codes[0]) else: return self.filter(language_code__in=language_codes)
[ "def", "translated", "(", "self", ",", "*", "language_codes", ")", ":", "# this API has the same semantics as django-parler's .translated() for familiarity.", "# However, since this package doesn't filter in a related field, the ORM limitations don't apply.", "if", "not", "language_codes", ":", "language_codes", "=", "(", "get_language", "(", ")", ",", ")", "else", ":", "# Since some code operates on a True/str switch, make sure that doesn't drip into this low level code.", "for", "language_code", "in", "language_codes", ":", "if", "not", "isinstance", "(", "language_code", ",", "six", ".", "string_types", ")", "or", "language_code", ".", "lower", "(", ")", "in", "(", "'1'", ",", "'0'", ",", "'true'", ",", "'false'", ")", ":", "raise", "ValueError", "(", "\"ContentItemQuerySet.translated() expected language_code to be an ISO code\"", ")", "if", "len", "(", "language_codes", ")", "==", "1", ":", "return", "self", ".", "filter", "(", "language_code", "=", "language_codes", "[", "0", "]", ")", "else", ":", "return", "self", ".", "filter", "(", "language_code__in", "=", "language_codes", ")" ]
.. versionadded:: 1.0 Only return translated objects which of the given languages. When no language codes are given, only the currently active language is returned.
[ "..", "versionadded", "::", "1", ".", "0" ]
train
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/models/managers.py#L57-L78