repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
koordinates/python-client
koordinates/sets.py
SetManager.set_metadata
def set_metadata(self, set_id, fp): """ Set the XML metadata on a set. :param file fp: file-like object to read the XML metadata from. """ base_url = self.client.get_url('SET', 'GET', 'single', {'id': set_id}) self._metadata.set(base_url, fp)
python
def set_metadata(self, set_id, fp): """ Set the XML metadata on a set. :param file fp: file-like object to read the XML metadata from. """ base_url = self.client.get_url('SET', 'GET', 'single', {'id': set_id}) self._metadata.set(base_url, fp)
[ "def", "set_metadata", "(", "self", ",", "set_id", ",", "fp", ")", ":", "base_url", "=", "self", ".", "client", ".", "get_url", "(", "'SET'", ",", "'GET'", ",", "'single'", ",", "{", "'id'", ":", "set_id", "}", ")", "self", ".", "_metadata", ".", "set", "(", "base_url", ",", "fp", ")" ]
Set the XML metadata on a set. :param file fp: file-like object to read the XML metadata from.
[ "Set", "the", "XML", "metadata", "on", "a", "set", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/sets.py#L45-L52
koordinates/python-client
koordinates/sets.py
Set.set_metadata
def set_metadata(self, fp): """ Set the XML metadata on a set. :param file fp: file-like object to read the XML metadata from. """ base_url = self._client.get_url('SET', 'GET', 'single', {'id': self.id}) self._manager._metadata.set(base_url, fp) # reload myself r = self._client.request('GET', base_url) return self._deserialize(r.json(), self._manager)
python
def set_metadata(self, fp): """ Set the XML metadata on a set. :param file fp: file-like object to read the XML metadata from. """ base_url = self._client.get_url('SET', 'GET', 'single', {'id': self.id}) self._manager._metadata.set(base_url, fp) # reload myself r = self._client.request('GET', base_url) return self._deserialize(r.json(), self._manager)
[ "def", "set_metadata", "(", "self", ",", "fp", ")", ":", "base_url", "=", "self", ".", "_client", ".", "get_url", "(", "'SET'", ",", "'GET'", ",", "'single'", ",", "{", "'id'", ":", "self", ".", "id", "}", ")", "self", ".", "_manager", ".", "_metadata", ".", "set", "(", "base_url", ",", "fp", ")", "# reload myself", "r", "=", "self", ".", "_client", ".", "request", "(", "'GET'", ",", "base_url", ")", "return", "self", ".", "_deserialize", "(", "r", ".", "json", "(", ")", ",", "self", ".", "_manager", ")" ]
Set the XML metadata on a set. :param file fp: file-like object to read the XML metadata from.
[ "Set", "the", "XML", "metadata", "on", "a", "set", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/sets.py#L80-L91
koordinates/python-client
koordinates/metadata.py
MetadataManager.set
def set(self, parent_url, fp): """ If the parent object already has XML metadata, it will be overwritten. Accepts XML metadata in any of the three supported formats. The format will be detected from the XML content. The Metadata object becomes invalid after setting :param file fp: A reference to an open file-like object which the content will be read from. """ url = parent_url + self.client.get_url_path('METADATA', 'POST', 'set', {}) r = self.client.request('POST', url, data=fp, headers={'Content-Type': 'text/xml'}) if r.status_code not in [200, 201]: raise exceptions.ServerError("Expected success response, got %s: %s" % (r.status_code, url))
python
def set(self, parent_url, fp): """ If the parent object already has XML metadata, it will be overwritten. Accepts XML metadata in any of the three supported formats. The format will be detected from the XML content. The Metadata object becomes invalid after setting :param file fp: A reference to an open file-like object which the content will be read from. """ url = parent_url + self.client.get_url_path('METADATA', 'POST', 'set', {}) r = self.client.request('POST', url, data=fp, headers={'Content-Type': 'text/xml'}) if r.status_code not in [200, 201]: raise exceptions.ServerError("Expected success response, got %s: %s" % (r.status_code, url))
[ "def", "set", "(", "self", ",", "parent_url", ",", "fp", ")", ":", "url", "=", "parent_url", "+", "self", ".", "client", ".", "get_url_path", "(", "'METADATA'", ",", "'POST'", ",", "'set'", ",", "{", "}", ")", "r", "=", "self", ".", "client", ".", "request", "(", "'POST'", ",", "url", ",", "data", "=", "fp", ",", "headers", "=", "{", "'Content-Type'", ":", "'text/xml'", "}", ")", "if", "r", ".", "status_code", "not", "in", "[", "200", ",", "201", "]", ":", "raise", "exceptions", ".", "ServerError", "(", "\"Expected success response, got %s: %s\"", "%", "(", "r", ".", "status_code", ",", "url", ")", ")" ]
If the parent object already has XML metadata, it will be overwritten. Accepts XML metadata in any of the three supported formats. The format will be detected from the XML content. The Metadata object becomes invalid after setting :param file fp: A reference to an open file-like object which the content will be read from.
[ "If", "the", "parent", "object", "already", "has", "XML", "metadata", "it", "will", "be", "overwritten", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/metadata.py#L21-L35
koordinates/python-client
koordinates/metadata.py
Metadata.get_xml
def get_xml(self, fp, format=FORMAT_NATIVE): """ Returns the XML metadata for this source, converted to the requested format. Converted metadata may not contain all the same information as the native format. :param file fp: A path, or an open file-like object which the content should be written to. :param str format: desired format for the output. This should be one of the available formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format. If you pass this function an open file-like object as the fp parameter, the function will not close that file for you. """ r = self._client.request('GET', getattr(self, format), stream=True) filename = stream.stream_response_to_file(r, path=fp) return filename
python
def get_xml(self, fp, format=FORMAT_NATIVE): """ Returns the XML metadata for this source, converted to the requested format. Converted metadata may not contain all the same information as the native format. :param file fp: A path, or an open file-like object which the content should be written to. :param str format: desired format for the output. This should be one of the available formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format. If you pass this function an open file-like object as the fp parameter, the function will not close that file for you. """ r = self._client.request('GET', getattr(self, format), stream=True) filename = stream.stream_response_to_file(r, path=fp) return filename
[ "def", "get_xml", "(", "self", ",", "fp", ",", "format", "=", "FORMAT_NATIVE", ")", ":", "r", "=", "self", ".", "_client", ".", "request", "(", "'GET'", ",", "getattr", "(", "self", ",", "format", ")", ",", "stream", "=", "True", ")", "filename", "=", "stream", ".", "stream_response_to_file", "(", "r", ",", "path", "=", "fp", ")", "return", "filename" ]
Returns the XML metadata for this source, converted to the requested format. Converted metadata may not contain all the same information as the native format. :param file fp: A path, or an open file-like object which the content should be written to. :param str format: desired format for the output. This should be one of the available formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format. If you pass this function an open file-like object as the fp parameter, the function will not close that file for you.
[ "Returns", "the", "XML", "metadata", "for", "this", "source", "converted", "to", "the", "requested", "format", ".", "Converted", "metadata", "may", "not", "contain", "all", "the", "same", "information", "as", "the", "native", "format", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/metadata.py#L47-L61
koordinates/python-client
koordinates/metadata.py
Metadata.get_formats
def get_formats(self): """ Return the available format names for this metadata """ formats = [] for key in (self.FORMAT_DC, self.FORMAT_FGDC, self.FORMAT_ISO): if hasattr(self, key): formats.append(key) return formats
python
def get_formats(self): """ Return the available format names for this metadata """ formats = [] for key in (self.FORMAT_DC, self.FORMAT_FGDC, self.FORMAT_ISO): if hasattr(self, key): formats.append(key) return formats
[ "def", "get_formats", "(", "self", ")", ":", "formats", "=", "[", "]", "for", "key", "in", "(", "self", ".", "FORMAT_DC", ",", "self", ".", "FORMAT_FGDC", ",", "self", ".", "FORMAT_ISO", ")", ":", "if", "hasattr", "(", "self", ",", "key", ")", ":", "formats", ".", "append", "(", "key", ")", "return", "formats" ]
Return the available format names for this metadata
[ "Return", "the", "available", "format", "names", "for", "this", "metadata" ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/metadata.py#L63-L69
koordinates/python-client
koordinates/utils.py
is_bound
def is_bound(method): """ Decorator that asserts the model instance is bound. Requires: 1. an ``id`` attribute 2. a ``url`` attribute 2. a manager set """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self._is_bound: raise ValueError("%r must be bound to call %s()" % (self, method.__name__)) return method(self, *args, **kwargs) return wrapper
python
def is_bound(method): """ Decorator that asserts the model instance is bound. Requires: 1. an ``id`` attribute 2. a ``url`` attribute 2. a manager set """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self._is_bound: raise ValueError("%r must be bound to call %s()" % (self, method.__name__)) return method(self, *args, **kwargs) return wrapper
[ "def", "is_bound", "(", "method", ")", ":", "@", "functools", ".", "wraps", "(", "method", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "_is_bound", ":", "raise", "ValueError", "(", "\"%r must be bound to call %s()\"", "%", "(", "self", ",", "method", ".", "__name__", ")", ")", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Decorator that asserts the model instance is bound. Requires: 1. an ``id`` attribute 2. a ``url`` attribute 2. a manager set
[ "Decorator", "that", "asserts", "the", "model", "instance", "is", "bound", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/utils.py#L16-L30
michaelaye/pyciss
pyciss/opusapi.py
OPUS.query_image_id
def query_image_id(self, image_id): """Query OPUS via the image_id. This is a query using the 'primaryfilespec' field of the OPUS database. It returns a list of URLS into the `obsids` attribute. This example queries for an image of Titan: >>> opus = opusapi.OPUS() >>> opus.query_image_id('N1695760475_1') After this, one can call `download_results()` to retrieve the found data into the standard locations into the database_path as defined in `.pyciss.yaml` (the config file), """ myquery = {"primaryfilespec": image_id} self.create_files_request(myquery, fmt="json") self.unpack_json_response() return self.obsids
python
def query_image_id(self, image_id): """Query OPUS via the image_id. This is a query using the 'primaryfilespec' field of the OPUS database. It returns a list of URLS into the `obsids` attribute. This example queries for an image of Titan: >>> opus = opusapi.OPUS() >>> opus.query_image_id('N1695760475_1') After this, one can call `download_results()` to retrieve the found data into the standard locations into the database_path as defined in `.pyciss.yaml` (the config file), """ myquery = {"primaryfilespec": image_id} self.create_files_request(myquery, fmt="json") self.unpack_json_response() return self.obsids
[ "def", "query_image_id", "(", "self", ",", "image_id", ")", ":", "myquery", "=", "{", "\"primaryfilespec\"", ":", "image_id", "}", "self", ".", "create_files_request", "(", "myquery", ",", "fmt", "=", "\"json\"", ")", "self", ".", "unpack_json_response", "(", ")", "return", "self", ".", "obsids" ]
Query OPUS via the image_id. This is a query using the 'primaryfilespec' field of the OPUS database. It returns a list of URLS into the `obsids` attribute. This example queries for an image of Titan: >>> opus = opusapi.OPUS() >>> opus.query_image_id('N1695760475_1') After this, one can call `download_results()` to retrieve the found data into the standard locations into the database_path as defined in `.pyciss.yaml` (the config file),
[ "Query", "OPUS", "via", "the", "image_id", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L161-L179
michaelaye/pyciss
pyciss/opusapi.py
OPUS.create_request_with_query
def create_request_with_query(self, kind, query, size="thumb", fmt="json"): """api/data.[fmt], api/images/[size].[fmt] api/files.[fmt] kind = ['data', 'images', 'files'] """ if kind == "data" or kind == "files": url = "{}/{}.{}".format(base_url, kind, fmt) elif kind == "images": url = "{}/images/{}.{}".format(base_url, size, fmt) self.url = url self.r = requests.get(url, params=unquote(urlencode(query)))
python
def create_request_with_query(self, kind, query, size="thumb", fmt="json"): """api/data.[fmt], api/images/[size].[fmt] api/files.[fmt] kind = ['data', 'images', 'files'] """ if kind == "data" or kind == "files": url = "{}/{}.{}".format(base_url, kind, fmt) elif kind == "images": url = "{}/images/{}.{}".format(base_url, size, fmt) self.url = url self.r = requests.get(url, params=unquote(urlencode(query)))
[ "def", "create_request_with_query", "(", "self", ",", "kind", ",", "query", ",", "size", "=", "\"thumb\"", ",", "fmt", "=", "\"json\"", ")", ":", "if", "kind", "==", "\"data\"", "or", "kind", "==", "\"files\"", ":", "url", "=", "\"{}/{}.{}\"", ".", "format", "(", "base_url", ",", "kind", ",", "fmt", ")", "elif", "kind", "==", "\"images\"", ":", "url", "=", "\"{}/images/{}.{}\"", ".", "format", "(", "base_url", ",", "size", ",", "fmt", ")", "self", ".", "url", "=", "url", "self", ".", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "unquote", "(", "urlencode", "(", "query", ")", ")", ")" ]
api/data.[fmt], api/images/[size].[fmt] api/files.[fmt] kind = ['data', 'images', 'files']
[ "api", "/", "data", ".", "[", "fmt", "]", "api", "/", "images", "/", "[", "size", "]", ".", "[", "fmt", "]", "api", "/", "files", ".", "[", "fmt", "]" ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L184-L196
michaelaye/pyciss
pyciss/opusapi.py
OPUS.get_between_times
def get_between_times(self, t1, t2, target=None): """ Query for OPUS data between times t1 and t2. Parameters ---------- t1, t2 : datetime.datetime, strings Start and end time for the query. If type is datetime, will be converted to isoformat string. If type is string already, it needs to be in an accepted international format for time strings. target : str Potential target for the observation query. Most likely will reduce the amount of data matching the query a lot. Returns ------- None, but set's state of the object to have new query results stored in self.obsids. """ try: # checking if times have isoformat() method (datetimes have) t1 = t1.isoformat() t2 = t2.isoformat() except AttributeError: # if not, should already be a string, so do nothing. pass myquery = self._get_time_query(t1, t2) if target is not None: myquery["target"] = target self.create_files_request(myquery, fmt="json") self.unpack_json_response()
python
def get_between_times(self, t1, t2, target=None): """ Query for OPUS data between times t1 and t2. Parameters ---------- t1, t2 : datetime.datetime, strings Start and end time for the query. If type is datetime, will be converted to isoformat string. If type is string already, it needs to be in an accepted international format for time strings. target : str Potential target for the observation query. Most likely will reduce the amount of data matching the query a lot. Returns ------- None, but set's state of the object to have new query results stored in self.obsids. """ try: # checking if times have isoformat() method (datetimes have) t1 = t1.isoformat() t2 = t2.isoformat() except AttributeError: # if not, should already be a string, so do nothing. pass myquery = self._get_time_query(t1, t2) if target is not None: myquery["target"] = target self.create_files_request(myquery, fmt="json") self.unpack_json_response()
[ "def", "get_between_times", "(", "self", ",", "t1", ",", "t2", ",", "target", "=", "None", ")", ":", "try", ":", "# checking if times have isoformat() method (datetimes have)", "t1", "=", "t1", ".", "isoformat", "(", ")", "t2", "=", "t2", ".", "isoformat", "(", ")", "except", "AttributeError", ":", "# if not, should already be a string, so do nothing.", "pass", "myquery", "=", "self", ".", "_get_time_query", "(", "t1", ",", "t2", ")", "if", "target", "is", "not", "None", ":", "myquery", "[", "\"target\"", "]", "=", "target", "self", ".", "create_files_request", "(", "myquery", ",", "fmt", "=", "\"json\"", ")", "self", ".", "unpack_json_response", "(", ")" ]
Query for OPUS data between times t1 and t2. Parameters ---------- t1, t2 : datetime.datetime, strings Start and end time for the query. If type is datetime, will be converted to isoformat string. If type is string already, it needs to be in an accepted international format for time strings. target : str Potential target for the observation query. Most likely will reduce the amount of data matching the query a lot. Returns ------- None, but set's state of the object to have new query results stored in self.obsids.
[ "Query", "for", "OPUS", "data", "between", "times", "t1", "and", "t2", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L251-L281
michaelaye/pyciss
pyciss/opusapi.py
OPUS.show_images
def show_images(self, size="small"): """Shows preview images using the Jupyter notebook HTML display. Parameters ========== size : {'small', 'med', 'thumb', 'full'} Determines the size of the preview image to be shown. """ d = dict(small=256, med=512, thumb=100, full=1024) try: width = d[size] except KeyError: print("Allowed keys:", d.keys()) return img_urls = [i._get_img_url(size) for i in self.obsids] imagesList = "".join( [ "<img style='width: {0}px; margin: 0px; float: " "left; border: 1px solid black;' " "src='{1}' />".format(width, s) for s in img_urls ] ) display(HTML(imagesList))
python
def show_images(self, size="small"): """Shows preview images using the Jupyter notebook HTML display. Parameters ========== size : {'small', 'med', 'thumb', 'full'} Determines the size of the preview image to be shown. """ d = dict(small=256, med=512, thumb=100, full=1024) try: width = d[size] except KeyError: print("Allowed keys:", d.keys()) return img_urls = [i._get_img_url(size) for i in self.obsids] imagesList = "".join( [ "<img style='width: {0}px; margin: 0px; float: " "left; border: 1px solid black;' " "src='{1}' />".format(width, s) for s in img_urls ] ) display(HTML(imagesList))
[ "def", "show_images", "(", "self", ",", "size", "=", "\"small\"", ")", ":", "d", "=", "dict", "(", "small", "=", "256", ",", "med", "=", "512", ",", "thumb", "=", "100", ",", "full", "=", "1024", ")", "try", ":", "width", "=", "d", "[", "size", "]", "except", "KeyError", ":", "print", "(", "\"Allowed keys:\"", ",", "d", ".", "keys", "(", ")", ")", "return", "img_urls", "=", "[", "i", ".", "_get_img_url", "(", "size", ")", "for", "i", "in", "self", ".", "obsids", "]", "imagesList", "=", "\"\"", ".", "join", "(", "[", "\"<img style='width: {0}px; margin: 0px; float: \"", "\"left; border: 1px solid black;' \"", "\"src='{1}' />\"", ".", "format", "(", "width", ",", "s", ")", "for", "s", "in", "img_urls", "]", ")", "display", "(", "HTML", "(", "imagesList", ")", ")" ]
Shows preview images using the Jupyter notebook HTML display. Parameters ========== size : {'small', 'med', 'thumb', 'full'} Determines the size of the preview image to be shown.
[ "Shows", "preview", "images", "using", "the", "Jupyter", "notebook", "HTML", "display", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L288-L311
michaelaye/pyciss
pyciss/opusapi.py
OPUS.download_results
def download_results(self, savedir=None, raw=True, calib=False, index=None): """Download the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager. """ obsids = self.obsids if index is None else [self.obsids[index]] for obsid in obsids: pm = io.PathManager(obsid.img_id, savedir=savedir) pm.basepath.mkdir(exist_ok=True) to_download = [] if raw is True: to_download.extend(obsid.raw_urls) if calib is True: to_download.extend(obsid.calib_urls) for url in to_download: basename = Path(url).name print("Downloading", basename) store_path = str(pm.basepath / basename) try: urlretrieve(url, store_path) except Exception as e: urlretrieve(url.replace("https", "http"), store_path) return str(pm.basepath)
python
def download_results(self, savedir=None, raw=True, calib=False, index=None): """Download the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager. """ obsids = self.obsids if index is None else [self.obsids[index]] for obsid in obsids: pm = io.PathManager(obsid.img_id, savedir=savedir) pm.basepath.mkdir(exist_ok=True) to_download = [] if raw is True: to_download.extend(obsid.raw_urls) if calib is True: to_download.extend(obsid.calib_urls) for url in to_download: basename = Path(url).name print("Downloading", basename) store_path = str(pm.basepath / basename) try: urlretrieve(url, store_path) except Exception as e: urlretrieve(url.replace("https", "http"), store_path) return str(pm.basepath)
[ "def", "download_results", "(", "self", ",", "savedir", "=", "None", ",", "raw", "=", "True", ",", "calib", "=", "False", ",", "index", "=", "None", ")", ":", "obsids", "=", "self", ".", "obsids", "if", "index", "is", "None", "else", "[", "self", ".", "obsids", "[", "index", "]", "]", "for", "obsid", "in", "obsids", ":", "pm", "=", "io", ".", "PathManager", "(", "obsid", ".", "img_id", ",", "savedir", "=", "savedir", ")", "pm", ".", "basepath", ".", "mkdir", "(", "exist_ok", "=", "True", ")", "to_download", "=", "[", "]", "if", "raw", "is", "True", ":", "to_download", ".", "extend", "(", "obsid", ".", "raw_urls", ")", "if", "calib", "is", "True", ":", "to_download", ".", "extend", "(", "obsid", ".", "calib_urls", ")", "for", "url", "in", "to_download", ":", "basename", "=", "Path", "(", "url", ")", ".", "name", "print", "(", "\"Downloading\"", ",", "basename", ")", "store_path", "=", "str", "(", "pm", ".", "basepath", "/", "basename", ")", "try", ":", "urlretrieve", "(", "url", ",", "store_path", ")", "except", "Exception", "as", "e", ":", "urlretrieve", "(", "url", ".", "replace", "(", "\"https\"", ",", "\"http\"", ")", ",", "store_path", ")", "return", "str", "(", "pm", ".", "basepath", ")" ]
Download the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager.
[ "Download", "the", "previously", "found", "and", "stored", "Opus", "obsids", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L313-L339
michaelaye/pyciss
pyciss/opusapi.py
OPUS.download_previews
def download_previews(self, savedir=None): """Download preview files for the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager. """ for obsid in self.obsids: pm = io.PathManager(obsid.img_id, savedir=savedir) pm.basepath.mkdir(exist_ok=True) basename = Path(obsid.medium_img_url).name print("Downloading", basename) urlretrieve(obsid.medium_img_url, str(pm.basepath / basename))
python
def download_previews(self, savedir=None): """Download preview files for the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager. """ for obsid in self.obsids: pm = io.PathManager(obsid.img_id, savedir=savedir) pm.basepath.mkdir(exist_ok=True) basename = Path(obsid.medium_img_url).name print("Downloading", basename) urlretrieve(obsid.medium_img_url, str(pm.basepath / basename))
[ "def", "download_previews", "(", "self", ",", "savedir", "=", "None", ")", ":", "for", "obsid", "in", "self", ".", "obsids", ":", "pm", "=", "io", ".", "PathManager", "(", "obsid", ".", "img_id", ",", "savedir", "=", "savedir", ")", "pm", ".", "basepath", ".", "mkdir", "(", "exist_ok", "=", "True", ")", "basename", "=", "Path", "(", "obsid", ".", "medium_img_url", ")", ".", "name", "print", "(", "\"Downloading\"", ",", "basename", ")", "urlretrieve", "(", "obsid", ".", "medium_img_url", ",", "str", "(", "pm", ".", "basepath", "/", "basename", ")", ")" ]
Download preview files for the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager.
[ "Download", "preview", "files", "for", "the", "previously", "found", "and", "stored", "Opus", "obsids", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L341-L355
michaelaye/pyciss
pyciss/_utils.py
which_epi_janus_resonance
def which_epi_janus_resonance(name, time): """Find which swap situtation we are in by time. Starting from 2006-01-21 where a Janus-Epimetheus swap occured, and defining the next 4 years until the next swap as `scenario1, and the 4 years after that `scenario2`. Calculate in units of 4 years, in which scenario the given time falls. Parameters ---------- time : timestring, datetime Time of the image. The astropy Time object can deal with both formats. Returns ------- str The given name string (either `janus` or `epimetheus`) and attach a 1 or 2, as appropriate. """ t1 = Time('2002-01-21').to_datetime() delta = Time(time).to_datetime() - t1 yearfraction = delta.days / 365 if int(yearfraction / 4) % 2 == 0: return name + '2' else: return name + '1'
python
def which_epi_janus_resonance(name, time): """Find which swap situtation we are in by time. Starting from 2006-01-21 where a Janus-Epimetheus swap occured, and defining the next 4 years until the next swap as `scenario1, and the 4 years after that `scenario2`. Calculate in units of 4 years, in which scenario the given time falls. Parameters ---------- time : timestring, datetime Time of the image. The astropy Time object can deal with both formats. Returns ------- str The given name string (either `janus` or `epimetheus`) and attach a 1 or 2, as appropriate. """ t1 = Time('2002-01-21').to_datetime() delta = Time(time).to_datetime() - t1 yearfraction = delta.days / 365 if int(yearfraction / 4) % 2 == 0: return name + '2' else: return name + '1'
[ "def", "which_epi_janus_resonance", "(", "name", ",", "time", ")", ":", "t1", "=", "Time", "(", "'2002-01-21'", ")", ".", "to_datetime", "(", ")", "delta", "=", "Time", "(", "time", ")", ".", "to_datetime", "(", ")", "-", "t1", "yearfraction", "=", "delta", ".", "days", "/", "365", "if", "int", "(", "yearfraction", "/", "4", ")", "%", "2", "==", "0", ":", "return", "name", "+", "'2'", "else", ":", "return", "name", "+", "'1'" ]
Find which swap situtation we are in by time. Starting from 2006-01-21 where a Janus-Epimetheus swap occured, and defining the next 4 years until the next swap as `scenario1, and the 4 years after that `scenario2`. Calculate in units of 4 years, in which scenario the given time falls. Parameters ---------- time : timestring, datetime Time of the image. The astropy Time object can deal with both formats. Returns ------- str The given name string (either `janus` or `epimetheus`) and attach a 1 or 2, as appropriate.
[ "Find", "which", "swap", "situtation", "we", "are", "in", "by", "time", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/_utils.py#L4-L29
koordinates/python-client
koordinates/layers.py
LayerManager.list_drafts
def list_drafts(self): """ A filterable list views of layers, returning the draft version of each layer. If the most recent version of a layer or table has been published already, it won’t be returned here. """ target_url = self.client.get_url('LAYER', 'GET', 'multidraft') return base.Query(self, target_url)
python
def list_drafts(self): """ A filterable list views of layers, returning the draft version of each layer. If the most recent version of a layer or table has been published already, it won’t be returned here. """ target_url = self.client.get_url('LAYER', 'GET', 'multidraft') return base.Query(self, target_url)
[ "def", "list_drafts", "(", "self", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'LAYER'", ",", "'GET'", ",", "'multidraft'", ")", "return", "base", ".", "Query", "(", "self", ",", "target_url", ")" ]
A filterable list views of layers, returning the draft version of each layer. If the most recent version of a layer or table has been published already, it won’t be returned here.
[ "A", "filterable", "list", "views", "of", "layers", "returning", "the", "draft", "version", "of", "each", "layer", ".", "If", "the", "most", "recent", "version", "of", "a", "layer", "or", "table", "has", "been", "published", "already", "it", "won’t", "be", "returned", "here", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L42-L49
koordinates/python-client
koordinates/layers.py
LayerManager.list_versions
def list_versions(self, layer_id): """ Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively. """ target_url = self.client.get_url('VERSION', 'GET', 'multi', {'layer_id': layer_id}) return base.Query(self, target_url, valid_filter_attributes=('data',), valid_sort_attributes=())
python
def list_versions(self, layer_id): """ Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively. """ target_url = self.client.get_url('VERSION', 'GET', 'multi', {'layer_id': layer_id}) return base.Query(self, target_url, valid_filter_attributes=('data',), valid_sort_attributes=())
[ "def", "list_versions", "(", "self", ",", "layer_id", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'multi'", ",", "{", "'layer_id'", ":", "layer_id", "}", ")", "return", "base", ".", "Query", "(", "self", ",", "target_url", ",", "valid_filter_attributes", "=", "(", "'data'", ",", ")", ",", "valid_sort_attributes", "=", "(", ")", ")" ]
Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively.
[ "Filterable", "list", "of", "versions", "of", "a", "layer", "always", "ordered", "newest", "to", "oldest", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L61-L71
koordinates/python-client
koordinates/layers.py
LayerManager.get_version
def get_version(self, layer_id, version_id, expand=[]): """ Get a specific version of a layer. """ target_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id}) return self._get(target_url, expand=expand)
python
def get_version(self, layer_id, version_id, expand=[]): """ Get a specific version of a layer. """ target_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id}) return self._get(target_url, expand=expand)
[ "def", "get_version", "(", "self", ",", "layer_id", ",", "version_id", ",", "expand", "=", "[", "]", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'single'", ",", "{", "'layer_id'", ":", "layer_id", ",", "'version_id'", ":", "version_id", "}", ")", "return", "self", ".", "_get", "(", "target_url", ",", "expand", "=", "expand", ")" ]
Get a specific version of a layer.
[ "Get", "a", "specific", "version", "of", "a", "layer", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L73-L78
koordinates/python-client
koordinates/layers.py
LayerManager.get_draft
def get_draft(self, layer_id, expand=[]): """ Get the current draft version of a layer. :raises NotFound: if there is no draft version. """ target_url = self.client.get_url('VERSION', 'GET', 'draft', {'layer_id': layer_id}) return self._get(target_url, expand=expand)
python
def get_draft(self, layer_id, expand=[]): """ Get the current draft version of a layer. :raises NotFound: if there is no draft version. """ target_url = self.client.get_url('VERSION', 'GET', 'draft', {'layer_id': layer_id}) return self._get(target_url, expand=expand)
[ "def", "get_draft", "(", "self", ",", "layer_id", ",", "expand", "=", "[", "]", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'draft'", ",", "{", "'layer_id'", ":", "layer_id", "}", ")", "return", "self", ".", "_get", "(", "target_url", ",", "expand", "=", "expand", ")" ]
Get the current draft version of a layer. :raises NotFound: if there is no draft version.
[ "Get", "the", "current", "draft", "version", "of", "a", "layer", ".", ":", "raises", "NotFound", ":", "if", "there", "is", "no", "draft", "version", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L80-L86
koordinates/python-client
koordinates/layers.py
LayerManager.get_published
def get_published(self, layer_id, expand=[]): """ Get the latest published version of this layer. :raises NotFound: if there is no published version. """ target_url = self.client.get_url('VERSION', 'GET', 'published', {'layer_id': layer_id}) return self._get(target_url, expand=expand)
python
def get_published(self, layer_id, expand=[]): """ Get the latest published version of this layer. :raises NotFound: if there is no published version. """ target_url = self.client.get_url('VERSION', 'GET', 'published', {'layer_id': layer_id}) return self._get(target_url, expand=expand)
[ "def", "get_published", "(", "self", ",", "layer_id", ",", "expand", "=", "[", "]", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'published'", ",", "{", "'layer_id'", ":", "layer_id", "}", ")", "return", "self", ".", "_get", "(", "target_url", ",", "expand", "=", "expand", ")" ]
Get the latest published version of this layer. :raises NotFound: if there is no published version.
[ "Get", "the", "latest", "published", "version", "of", "this", "layer", ".", ":", "raises", "NotFound", ":", "if", "there", "is", "no", "published", "version", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L88-L94
koordinates/python-client
koordinates/layers.py
LayerManager.create_draft
def create_draft(self, layer_id): """ Creates a new draft version. If anything in the data object has changed then an import will begin immediately. Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`. :rtype: Layer :return: the new version :raises Conflict: if there is already a draft version for this layer. """ target_url = self.client.get_url('VERSION', 'POST', 'create', {'layer_id': layer_id}) r = self.client.request('POST', target_url, json={}) return self.create_from_result(r.json())
python
def create_draft(self, layer_id): """ Creates a new draft version. If anything in the data object has changed then an import will begin immediately. Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`. :rtype: Layer :return: the new version :raises Conflict: if there is already a draft version for this layer. """ target_url = self.client.get_url('VERSION', 'POST', 'create', {'layer_id': layer_id}) r = self.client.request('POST', target_url, json={}) return self.create_from_result(r.json())
[ "def", "create_draft", "(", "self", ",", "layer_id", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'POST'", ",", "'create'", ",", "{", "'layer_id'", ":", "layer_id", "}", ")", "r", "=", "self", ".", "client", ".", "request", "(", "'POST'", ",", "target_url", ",", "json", "=", "{", "}", ")", "return", "self", ".", "create_from_result", "(", "r", ".", "json", "(", ")", ")" ]
Creates a new draft version. If anything in the data object has changed then an import will begin immediately. Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`. :rtype: Layer :return: the new version :raises Conflict: if there is already a draft version for this layer.
[ "Creates", "a", "new", "draft", "version", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L96-L109
koordinates/python-client
koordinates/layers.py
LayerManager.start_import
def start_import(self, layer_id, version_id): """ Starts importing the specified draft version (cancelling any running import), even if the data object hasn’t changed from the previous version. """ target_url = self.client.get_url('VERSION', 'POST', 'import', {'layer_id': layer_id, 'version_id': version_id}) r = self.client.request('POST', target_url, json={}) return self.create_from_result(r.json())
python
def start_import(self, layer_id, version_id): """ Starts importing the specified draft version (cancelling any running import), even if the data object hasn’t changed from the previous version. """ target_url = self.client.get_url('VERSION', 'POST', 'import', {'layer_id': layer_id, 'version_id': version_id}) r = self.client.request('POST', target_url, json={}) return self.create_from_result(r.json())
[ "def", "start_import", "(", "self", ",", "layer_id", ",", "version_id", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'POST'", ",", "'import'", ",", "{", "'layer_id'", ":", "layer_id", ",", "'version_id'", ":", "version_id", "}", ")", "r", "=", "self", ".", "client", ".", "request", "(", "'POST'", ",", "target_url", ",", "json", "=", "{", "}", ")", "return", "self", ".", "create_from_result", "(", "r", ".", "json", "(", ")", ")" ]
Starts importing the specified draft version (cancelling any running import), even if the data object hasn’t changed from the previous version.
[ "Starts", "importing", "the", "specified", "draft", "version", "(", "cancelling", "any", "running", "import", ")", "even", "if", "the", "data", "object", "hasn’t", "changed", "from", "the", "previous", "version", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L111-L118
koordinates/python-client
koordinates/layers.py
LayerManager.start_update
def start_update(self, layer_id): """ A shortcut to create a new version and start importing it. Effectively the same as :py:meth:`koordinates.layers.LayerManager.create_draft` followed by :py:meth:`koordinates.layers.LayerManager.start_import`. """ target_url = self.client.get_url('LAYER', 'POST', 'update', {'layer_id': layer_id}) r = self.client.request('POST', target_url, json={}) return self.parent.create_from_result(r.json())
python
def start_update(self, layer_id): """ A shortcut to create a new version and start importing it. Effectively the same as :py:meth:`koordinates.layers.LayerManager.create_draft` followed by :py:meth:`koordinates.layers.LayerManager.start_import`. """ target_url = self.client.get_url('LAYER', 'POST', 'update', {'layer_id': layer_id}) r = self.client.request('POST', target_url, json={}) return self.parent.create_from_result(r.json())
[ "def", "start_update", "(", "self", ",", "layer_id", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'LAYER'", ",", "'POST'", ",", "'update'", ",", "{", "'layer_id'", ":", "layer_id", "}", ")", "r", "=", "self", ".", "client", ".", "request", "(", "'POST'", ",", "target_url", ",", "json", "=", "{", "}", ")", "return", "self", ".", "parent", ".", "create_from_result", "(", "r", ".", "json", "(", ")", ")" ]
A shortcut to create a new version and start importing it. Effectively the same as :py:meth:`koordinates.layers.LayerManager.create_draft` followed by :py:meth:`koordinates.layers.LayerManager.start_import`.
[ "A", "shortcut", "to", "create", "a", "new", "version", "and", "start", "importing", "it", ".", "Effectively", "the", "same", "as", ":", "py", ":", "meth", ":", "koordinates", ".", "layers", ".", "LayerManager", ".", "create_draft", "followed", "by", ":", "py", ":", "meth", ":", "koordinates", ".", "layers", ".", "LayerManager", ".", "start_import", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L120-L127
koordinates/python-client
koordinates/layers.py
LayerManager.set_metadata
def set_metadata(self, layer_id, version_id, fp): """ Set the XML metadata on a layer draft version. :param file fp: file-like object to read the XML metadata from. :raises NotAllowed: if the version is already published. """ base_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id}) self._metadata.set(base_url, fp)
python
def set_metadata(self, layer_id, version_id, fp): """ Set the XML metadata on a layer draft version. :param file fp: file-like object to read the XML metadata from. :raises NotAllowed: if the version is already published. """ base_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id}) self._metadata.set(base_url, fp)
[ "def", "set_metadata", "(", "self", ",", "layer_id", ",", "version_id", ",", "fp", ")", ":", "base_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'single'", ",", "{", "'layer_id'", ":", "layer_id", ",", "'version_id'", ":", "version_id", "}", ")", "self", ".", "_metadata", ".", "set", "(", "base_url", ",", "fp", ")" ]
Set the XML metadata on a layer draft version. :param file fp: file-like object to read the XML metadata from. :raises NotAllowed: if the version is already published.
[ "Set", "the", "XML", "metadata", "on", "a", "layer", "draft", "version", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L129-L137
koordinates/python-client
koordinates/layers.py
Layer.is_published_version
def is_published_version(self): """ Return if this version is the published version of a layer """ pub_ver = getattr(self, 'published_version', None) this_ver = getattr(self, 'this_version', None) return this_ver and pub_ver and (this_ver == pub_ver)
python
def is_published_version(self): """ Return if this version is the published version of a layer """ pub_ver = getattr(self, 'published_version', None) this_ver = getattr(self, 'this_version', None) return this_ver and pub_ver and (this_ver == pub_ver)
[ "def", "is_published_version", "(", "self", ")", ":", "pub_ver", "=", "getattr", "(", "self", ",", "'published_version'", ",", "None", ")", "this_ver", "=", "getattr", "(", "self", ",", "'this_version'", ",", "None", ")", "return", "this_ver", "and", "pub_ver", "and", "(", "this_ver", "==", "pub_ver", ")" ]
Return if this version is the published version of a layer
[ "Return", "if", "this", "version", "is", "the", "published", "version", "of", "a", "layer" ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L172-L176
koordinates/python-client
koordinates/layers.py
Layer.is_draft_version
def is_draft_version(self): """ Return if this version is the draft version of a layer """ pub_ver = getattr(self, 'published_version', None) latest_ver = getattr(self, 'latest_version', None) this_ver = getattr(self, 'this_version', None) return this_ver and latest_ver and (this_ver == latest_ver) and (latest_ver != pub_ver)
python
def is_draft_version(self): """ Return if this version is the draft version of a layer """ pub_ver = getattr(self, 'published_version', None) latest_ver = getattr(self, 'latest_version', None) this_ver = getattr(self, 'this_version', None) return this_ver and latest_ver and (this_ver == latest_ver) and (latest_ver != pub_ver)
[ "def", "is_draft_version", "(", "self", ")", ":", "pub_ver", "=", "getattr", "(", "self", ",", "'published_version'", ",", "None", ")", "latest_ver", "=", "getattr", "(", "self", ",", "'latest_version'", ",", "None", ")", "this_ver", "=", "getattr", "(", "self", ",", "'this_version'", ",", "None", ")", "return", "this_ver", "and", "latest_ver", "and", "(", "this_ver", "==", "latest_ver", ")", "and", "(", "latest_ver", "!=", "pub_ver", ")" ]
Return if this version is the draft version of a layer
[ "Return", "if", "this", "version", "is", "the", "draft", "version", "of", "a", "layer" ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L179-L184
koordinates/python-client
koordinates/layers.py
Layer.list_versions
def list_versions(self): """ Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively. """ target_url = self._client.get_url('VERSION', 'GET', 'multi', {'layer_id': self.id}) return base.Query(self._manager, target_url, valid_filter_attributes=('data',), valid_sort_attributes=())
python
def list_versions(self): """ Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively. """ target_url = self._client.get_url('VERSION', 'GET', 'multi', {'layer_id': self.id}) return base.Query(self._manager, target_url, valid_filter_attributes=('data',), valid_sort_attributes=())
[ "def", "list_versions", "(", "self", ")", ":", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'multi'", ",", "{", "'layer_id'", ":", "self", ".", "id", "}", ")", "return", "base", ".", "Query", "(", "self", ".", "_manager", ",", "target_url", ",", "valid_filter_attributes", "=", "(", "'data'", ",", ")", ",", "valid_sort_attributes", "=", "(", ")", ")" ]
Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively.
[ "Filterable", "list", "of", "versions", "of", "a", "layer", "always", "ordered", "newest", "to", "oldest", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L187-L197
koordinates/python-client
koordinates/layers.py
Layer.get_version
def get_version(self, version_id, expand=[]): """ Get a specific version of this layer """ target_url = self._client.get_url('VERSION', 'GET', 'single', {'layer_id': self.id, 'version_id': version_id}) return self._manager._get(target_url, expand=expand)
python
def get_version(self, version_id, expand=[]): """ Get a specific version of this layer """ target_url = self._client.get_url('VERSION', 'GET', 'single', {'layer_id': self.id, 'version_id': version_id}) return self._manager._get(target_url, expand=expand)
[ "def", "get_version", "(", "self", ",", "version_id", ",", "expand", "=", "[", "]", ")", ":", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'single'", ",", "{", "'layer_id'", ":", "self", ".", "id", ",", "'version_id'", ":", "version_id", "}", ")", "return", "self", ".", "_manager", ".", "_get", "(", "target_url", ",", "expand", "=", "expand", ")" ]
Get a specific version of this layer
[ "Get", "a", "specific", "version", "of", "this", "layer" ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L200-L205
koordinates/python-client
koordinates/layers.py
Layer.get_draft_version
def get_draft_version(self, expand=[]): """ Get the current draft version of this layer. :raises NotFound: if there is no draft version. """ target_url = self._client.get_url('VERSION', 'GET', 'draft', {'layer_id': self.id}) return self._manager._get(target_url, expand=expand)
python
def get_draft_version(self, expand=[]): """ Get the current draft version of this layer. :raises NotFound: if there is no draft version. """ target_url = self._client.get_url('VERSION', 'GET', 'draft', {'layer_id': self.id}) return self._manager._get(target_url, expand=expand)
[ "def", "get_draft_version", "(", "self", ",", "expand", "=", "[", "]", ")", ":", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'draft'", ",", "{", "'layer_id'", ":", "self", ".", "id", "}", ")", "return", "self", ".", "_manager", ".", "_get", "(", "target_url", ",", "expand", "=", "expand", ")" ]
Get the current draft version of this layer. :raises NotFound: if there is no draft version.
[ "Get", "the", "current", "draft", "version", "of", "this", "layer", ".", ":", "raises", "NotFound", ":", "if", "there", "is", "no", "draft", "version", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L208-L214
koordinates/python-client
koordinates/layers.py
Layer.start_import
def start_import(self, version_id=None): """ Starts importing this draft layerversion (cancelling any running import), even if the data object hasn’t changed from the previous version. :raises Conflict: if this version is already published. """ if not version_id: version_id = self.version.id target_url = self._client.get_url('VERSION', 'POST', 'import', {'layer_id': self.id, 'version_id': version_id}) r = self._client.request('POST', target_url, json={}) return self._deserialize(r.json(), self._manager)
python
def start_import(self, version_id=None): """ Starts importing this draft layerversion (cancelling any running import), even if the data object hasn’t changed from the previous version. :raises Conflict: if this version is already published. """ if not version_id: version_id = self.version.id target_url = self._client.get_url('VERSION', 'POST', 'import', {'layer_id': self.id, 'version_id': version_id}) r = self._client.request('POST', target_url, json={}) return self._deserialize(r.json(), self._manager)
[ "def", "start_import", "(", "self", ",", "version_id", "=", "None", ")", ":", "if", "not", "version_id", ":", "version_id", "=", "self", ".", "version", ".", "id", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'VERSION'", ",", "'POST'", ",", "'import'", ",", "{", "'layer_id'", ":", "self", ".", "id", ",", "'version_id'", ":", "version_id", "}", ")", "r", "=", "self", ".", "_client", ".", "request", "(", "'POST'", ",", "target_url", ",", "json", "=", "{", "}", ")", "return", "self", ".", "_deserialize", "(", "r", ".", "json", "(", ")", ",", "self", ".", "_manager", ")" ]
Starts importing this draft layerversion (cancelling any running import), even if the data object hasn’t changed from the previous version. :raises Conflict: if this version is already published.
[ "Starts", "importing", "this", "draft", "layerversion", "(", "cancelling", "any", "running", "import", ")", "even", "if", "the", "data", "object", "hasn’t", "changed", "from", "the", "previous", "version", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L242-L254
koordinates/python-client
koordinates/layers.py
Layer.start_update
def start_update(self): """ A shortcut to create a new version and start importing it. Effectively the same as :py:meth:`.create_draft_version` followed by :py:meth:`koordinates.layers.Layer.start_import`. :rtype: Layer :return: the new version :raises Conflict: if there is already a draft version for this layer. """ target_url = self._client.get_url('LAYER', 'POST', 'update', {'layer_id': self.id}) r = self._client.request('POST', target_url, json={}) return self._manager.create_from_result(r.json())
python
def start_update(self): """ A shortcut to create a new version and start importing it. Effectively the same as :py:meth:`.create_draft_version` followed by :py:meth:`koordinates.layers.Layer.start_import`. :rtype: Layer :return: the new version :raises Conflict: if there is already a draft version for this layer. """ target_url = self._client.get_url('LAYER', 'POST', 'update', {'layer_id': self.id}) r = self._client.request('POST', target_url, json={}) return self._manager.create_from_result(r.json())
[ "def", "start_update", "(", "self", ")", ":", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'LAYER'", ",", "'POST'", ",", "'update'", ",", "{", "'layer_id'", ":", "self", ".", "id", "}", ")", "r", "=", "self", ".", "_client", ".", "request", "(", "'POST'", ",", "target_url", ",", "json", "=", "{", "}", ")", "return", "self", ".", "_manager", ".", "create_from_result", "(", "r", ".", "json", "(", ")", ")" ]
A shortcut to create a new version and start importing it. Effectively the same as :py:meth:`.create_draft_version` followed by :py:meth:`koordinates.layers.Layer.start_import`. :rtype: Layer :return: the new version :raises Conflict: if there is already a draft version for this layer.
[ "A", "shortcut", "to", "create", "a", "new", "version", "and", "start", "importing", "it", ".", "Effectively", "the", "same", "as", ":", "py", ":", "meth", ":", ".", "create_draft_version", "followed", "by", ":", "py", ":", "meth", ":", "koordinates", ".", "layers", ".", "Layer", ".", "start_import", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L257-L268
koordinates/python-client
koordinates/layers.py
Layer.publish
def publish(self, version_id=None): """ Creates a publish task just for this version, which publishes as soon as any import is complete. :return: the publish task :rtype: Publish :raises Conflict: If the version is already published, or already has a publish job. """ if not version_id: version_id = self.version.id target_url = self._client.get_url('VERSION', 'POST', 'publish', {'layer_id': self.id, 'version_id': version_id}) r = self._client.request('POST', target_url, json={}) return self._client.get_manager(Publish).create_from_result(r.json())
python
def publish(self, version_id=None): """ Creates a publish task just for this version, which publishes as soon as any import is complete. :return: the publish task :rtype: Publish :raises Conflict: If the version is already published, or already has a publish job. """ if not version_id: version_id = self.version.id target_url = self._client.get_url('VERSION', 'POST', 'publish', {'layer_id': self.id, 'version_id': version_id}) r = self._client.request('POST', target_url, json={}) return self._client.get_manager(Publish).create_from_result(r.json())
[ "def", "publish", "(", "self", ",", "version_id", "=", "None", ")", ":", "if", "not", "version_id", ":", "version_id", "=", "self", ".", "version", ".", "id", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'VERSION'", ",", "'POST'", ",", "'publish'", ",", "{", "'layer_id'", ":", "self", ".", "id", ",", "'version_id'", ":", "version_id", "}", ")", "r", "=", "self", ".", "_client", ".", "request", "(", "'POST'", ",", "target_url", ",", "json", "=", "{", "}", ")", "return", "self", ".", "_client", ".", "get_manager", "(", "Publish", ")", ".", "create_from_result", "(", "r", ".", "json", "(", ")", ")" ]
Creates a publish task just for this version, which publishes as soon as any import is complete. :return: the publish task :rtype: Publish :raises Conflict: If the version is already published, or already has a publish job.
[ "Creates", "a", "publish", "task", "just", "for", "this", "version", "which", "publishes", "as", "soon", "as", "any", "import", "is", "complete", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L271-L284
koordinates/python-client
koordinates/layers.py
Layer.save
def save(self, with_data=False): """ Edits this draft layerversion. # If anything in the data object has changed, cancel any existing import and start a new one. :param bool with_data: if ``True``, send the data object, which will start a new import and cancel any existing one. If ``False``, the data object will *not* be sent, and no import will start. :raises NotAllowed: if the version is already published. """ target_url = self._client.get_url('VERSION', 'PUT', 'edit', {'layer_id': self.id, 'version_id': self.version.id}) r = self._client.request('PUT', target_url, json=self._serialize(with_data=with_data)) return self._deserialize(r.json(), self._manager)
python
def save(self, with_data=False): """ Edits this draft layerversion. # If anything in the data object has changed, cancel any existing import and start a new one. :param bool with_data: if ``True``, send the data object, which will start a new import and cancel any existing one. If ``False``, the data object will *not* be sent, and no import will start. :raises NotAllowed: if the version is already published. """ target_url = self._client.get_url('VERSION', 'PUT', 'edit', {'layer_id': self.id, 'version_id': self.version.id}) r = self._client.request('PUT', target_url, json=self._serialize(with_data=with_data)) return self._deserialize(r.json(), self._manager)
[ "def", "save", "(", "self", ",", "with_data", "=", "False", ")", ":", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'VERSION'", ",", "'PUT'", ",", "'edit'", ",", "{", "'layer_id'", ":", "self", ".", "id", ",", "'version_id'", ":", "self", ".", "version", ".", "id", "}", ")", "r", "=", "self", ".", "_client", ".", "request", "(", "'PUT'", ",", "target_url", ",", "json", "=", "self", ".", "_serialize", "(", "with_data", "=", "with_data", ")", ")", "return", "self", ".", "_deserialize", "(", "r", ".", "json", "(", ")", ",", "self", ".", "_manager", ")" ]
Edits this draft layerversion. # If anything in the data object has changed, cancel any existing import and start a new one. :param bool with_data: if ``True``, send the data object, which will start a new import and cancel any existing one. If ``False``, the data object will *not* be sent, and no import will start. :raises NotAllowed: if the version is already published.
[ "Edits", "this", "draft", "layerversion", ".", "#", "If", "anything", "in", "the", "data", "object", "has", "changed", "cancel", "any", "existing", "import", "and", "start", "a", "new", "one", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L287-L298
koordinates/python-client
koordinates/layers.py
Layer.delete_version
def delete_version(self, version_id=None): """ Deletes this draft version (revert to published) :raises NotAllowed: if this version is already published. :raises Conflict: if this version is already deleted. """ if not version_id: version_id = self.version.id target_url = self._client.get_url('VERSION', 'DELETE', 'single', {'layer_id': self.id, 'version_id': version_id}) r = self._client.request('DELETE', target_url) logger.info("delete_version(): %s", r.status_code)
python
def delete_version(self, version_id=None): """ Deletes this draft version (revert to published) :raises NotAllowed: if this version is already published. :raises Conflict: if this version is already deleted. """ if not version_id: version_id = self.version.id target_url = self._client.get_url('VERSION', 'DELETE', 'single', {'layer_id': self.id, 'version_id': version_id}) r = self._client.request('DELETE', target_url) logger.info("delete_version(): %s", r.status_code)
[ "def", "delete_version", "(", "self", ",", "version_id", "=", "None", ")", ":", "if", "not", "version_id", ":", "version_id", "=", "self", ".", "version", ".", "id", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'VERSION'", ",", "'DELETE'", ",", "'single'", ",", "{", "'layer_id'", ":", "self", ".", "id", ",", "'version_id'", ":", "version_id", "}", ")", "r", "=", "self", ".", "_client", ".", "request", "(", "'DELETE'", ",", "target_url", ")", "logger", ".", "info", "(", "\"delete_version(): %s\"", ",", "r", ".", "status_code", ")" ]
Deletes this draft version (revert to published) :raises NotAllowed: if this version is already published. :raises Conflict: if this version is already deleted.
[ "Deletes", "this", "draft", "version", "(", "revert", "to", "published", ")" ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L301-L313
koordinates/python-client
koordinates/catalog.py
CatalogManager._get_item_class
def _get_item_class(self, url): """ Return the model class matching a URL """ if '/layers/' in url: return Layer elif '/tables/' in url: return Table elif '/sets/' in url: return Set # elif '/documents/' in url: # return Document else: raise NotImplementedError("No support for catalog results of type %s" % url)
python
def _get_item_class(self, url): """ Return the model class matching a URL """ if '/layers/' in url: return Layer elif '/tables/' in url: return Table elif '/sets/' in url: return Set # elif '/documents/' in url: # return Document else: raise NotImplementedError("No support for catalog results of type %s" % url)
[ "def", "_get_item_class", "(", "self", ",", "url", ")", ":", "if", "'/layers/'", "in", "url", ":", "return", "Layer", "elif", "'/tables/'", "in", "url", ":", "return", "Table", "elif", "'/sets/'", "in", "url", ":", "return", "Set", "# elif '/documents/' in url:", "# return Document", "else", ":", "raise", "NotImplementedError", "(", "\"No support for catalog results of type %s\"", "%", "url", ")" ]
Return the model class matching a URL
[ "Return", "the", "model", "class", "matching", "a", "URL" ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/catalog.py#L40-L51
michaelaye/pyciss
pyciss/solitons.py
get_year_since_resonance
def get_year_since_resonance(ringcube): "Calculate the fraction of the year since moon swap." t0 = dt(2006, 1, 21) td = ringcube.imagetime - t0 return td.days / 365.25
python
def get_year_since_resonance(ringcube): "Calculate the fraction of the year since moon swap." t0 = dt(2006, 1, 21) td = ringcube.imagetime - t0 return td.days / 365.25
[ "def", "get_year_since_resonance", "(", "ringcube", ")", ":", "t0", "=", "dt", "(", "2006", ",", "1", ",", "21", ")", "td", "=", "ringcube", ".", "imagetime", "-", "t0", "return", "td", ".", "days", "/", "365.25" ]
Calculate the fraction of the year since moon swap.
[ "Calculate", "the", "fraction", "of", "the", "year", "since", "moon", "swap", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/solitons.py#L12-L16
michaelaye/pyciss
pyciss/solitons.py
create_polynoms
def create_polynoms(): """Create and return poly1d objects. Uses the parameters from Morgan to create poly1d objects for calculations. """ fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv') res_df = pd.read_csv(fname) polys = {} for resorder, row in zip('65 54 43 21'.split(), range(4)): p = poly1d([res_df.loc[row, 'Slope (km/yr)'], res_df.loc[row, 'Intercept (km)']]) polys['janus ' + ':'.join(resorder)] = p return polys
python
def create_polynoms(): """Create and return poly1d objects. Uses the parameters from Morgan to create poly1d objects for calculations. """ fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv') res_df = pd.read_csv(fname) polys = {} for resorder, row in zip('65 54 43 21'.split(), range(4)): p = poly1d([res_df.loc[row, 'Slope (km/yr)'], res_df.loc[row, 'Intercept (km)']]) polys['janus ' + ':'.join(resorder)] = p return polys
[ "def", "create_polynoms", "(", ")", ":", "fname", "=", "pr", ".", "resource_filename", "(", "'pyciss'", ",", "'data/soliton_prediction_parameters.csv'", ")", "res_df", "=", "pd", ".", "read_csv", "(", "fname", ")", "polys", "=", "{", "}", "for", "resorder", ",", "row", "in", "zip", "(", "'65 54 43 21'", ".", "split", "(", ")", ",", "range", "(", "4", ")", ")", ":", "p", "=", "poly1d", "(", "[", "res_df", ".", "loc", "[", "row", ",", "'Slope (km/yr)'", "]", ",", "res_df", ".", "loc", "[", "row", ",", "'Intercept (km)'", "]", "]", ")", "polys", "[", "'janus '", "+", "':'", ".", "join", "(", "resorder", ")", "]", "=", "p", "return", "polys" ]
Create and return poly1d objects. Uses the parameters from Morgan to create poly1d objects for calculations.
[ "Create", "and", "return", "poly1d", "objects", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/solitons.py#L19-L32
michaelaye/pyciss
pyciss/solitons.py
check_for_soliton
def check_for_soliton(img_id): """Workhorse function. Creates the polynom. Calculates radius constraints from attributes in `ringcube` object. Parameters ---------- ringcube : pyciss.ringcube.RingCube A containter class for a ring-projected ISS image file. Returns ------- dict Dictionary with all solitons found. Reason why it is a dict is that it could be more than one in one image. """ pm = io.PathManager(img_id) try: ringcube = RingCube(pm.cubepath) except FileNotFoundError: ringcube = RingCube(pm.undestriped) polys = create_polynoms() minrad = ringcube.minrad.to(u.km) maxrad = ringcube.maxrad.to(u.km) delta_years = get_year_since_resonance(ringcube) soliton_radii = {} for k, p in polys.items(): current_r = p(delta_years) * u.km if minrad < current_r < maxrad: soliton_radii[k] = current_r return soliton_radii if soliton_radii else None
python
def check_for_soliton(img_id): """Workhorse function. Creates the polynom. Calculates radius constraints from attributes in `ringcube` object. Parameters ---------- ringcube : pyciss.ringcube.RingCube A containter class for a ring-projected ISS image file. Returns ------- dict Dictionary with all solitons found. Reason why it is a dict is that it could be more than one in one image. """ pm = io.PathManager(img_id) try: ringcube = RingCube(pm.cubepath) except FileNotFoundError: ringcube = RingCube(pm.undestriped) polys = create_polynoms() minrad = ringcube.minrad.to(u.km) maxrad = ringcube.maxrad.to(u.km) delta_years = get_year_since_resonance(ringcube) soliton_radii = {} for k, p in polys.items(): current_r = p(delta_years) * u.km if minrad < current_r < maxrad: soliton_radii[k] = current_r return soliton_radii if soliton_radii else None
[ "def", "check_for_soliton", "(", "img_id", ")", ":", "pm", "=", "io", ".", "PathManager", "(", "img_id", ")", "try", ":", "ringcube", "=", "RingCube", "(", "pm", ".", "cubepath", ")", "except", "FileNotFoundError", ":", "ringcube", "=", "RingCube", "(", "pm", ".", "undestriped", ")", "polys", "=", "create_polynoms", "(", ")", "minrad", "=", "ringcube", ".", "minrad", ".", "to", "(", "u", ".", "km", ")", "maxrad", "=", "ringcube", ".", "maxrad", ".", "to", "(", "u", ".", "km", ")", "delta_years", "=", "get_year_since_resonance", "(", "ringcube", ")", "soliton_radii", "=", "{", "}", "for", "k", ",", "p", "in", "polys", ".", "items", "(", ")", ":", "current_r", "=", "p", "(", "delta_years", ")", "*", "u", ".", "km", "if", "minrad", "<", "current_r", "<", "maxrad", ":", "soliton_radii", "[", "k", "]", "=", "current_r", "return", "soliton_radii", "if", "soliton_radii", "else", "None" ]
Workhorse function. Creates the polynom. Calculates radius constraints from attributes in `ringcube` object. Parameters ---------- ringcube : pyciss.ringcube.RingCube A containter class for a ring-projected ISS image file. Returns ------- dict Dictionary with all solitons found. Reason why it is a dict is that it could be more than one in one image.
[ "Workhorse", "function", "." ]
train
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/solitons.py#L35-L66
koordinates/python-client
koordinates/client.py
Client.get_manager
def get_manager(self, model): """ Return the active manager for the given model. :param model: Model class to look up the manager instance for. :return: Manager instance for the model associated with this client. """ if isinstance(model, six.string_types): # undocumented string lookup for k, m in self._manager_map.items(): if k.__name__ == model: return m else: raise KeyError(model) return self._manager_map[model]
python
def get_manager(self, model): """ Return the active manager for the given model. :param model: Model class to look up the manager instance for. :return: Manager instance for the model associated with this client. """ if isinstance(model, six.string_types): # undocumented string lookup for k, m in self._manager_map.items(): if k.__name__ == model: return m else: raise KeyError(model) return self._manager_map[model]
[ "def", "get_manager", "(", "self", ",", "model", ")", ":", "if", "isinstance", "(", "model", ",", "six", ".", "string_types", ")", ":", "# undocumented string lookup", "for", "k", ",", "m", "in", "self", ".", "_manager_map", ".", "items", "(", ")", ":", "if", "k", ".", "__name__", "==", "model", ":", "return", "m", "else", ":", "raise", "KeyError", "(", "model", ")", "return", "self", ".", "_manager_map", "[", "model", "]" ]
Return the active manager for the given model. :param model: Model class to look up the manager instance for. :return: Manager instance for the model associated with this client.
[ "Return", "the", "active", "manager", "for", "the", "given", "model", ".", ":", "param", "model", ":", "Model", "class", "to", "look", "up", "the", "manager", "instance", "for", ".", ":", "return", ":", "Manager", "instance", "for", "the", "model", "associated", "with", "this", "client", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/client.py#L102-L116
koordinates/python-client
koordinates/client.py
Client._assemble_headers
def _assemble_headers(self, method, user_headers=None): """ Takes the supplied headers and adds in any which are defined at a client level and then returns the result. :param user_headers: a `dict` containing headers defined at the request level, optional. :return: a `dict` instance """ headers = copy.deepcopy(user_headers or {}) if method not in ('GET', 'HEAD'): headers.setdefault('Content-Type', 'application/json') return headers
python
def _assemble_headers(self, method, user_headers=None): """ Takes the supplied headers and adds in any which are defined at a client level and then returns the result. :param user_headers: a `dict` containing headers defined at the request level, optional. :return: a `dict` instance """ headers = copy.deepcopy(user_headers or {}) if method not in ('GET', 'HEAD'): headers.setdefault('Content-Type', 'application/json') return headers
[ "def", "_assemble_headers", "(", "self", ",", "method", ",", "user_headers", "=", "None", ")", ":", "headers", "=", "copy", ".", "deepcopy", "(", "user_headers", "or", "{", "}", ")", "if", "method", "not", "in", "(", "'GET'", ",", "'HEAD'", ")", ":", "headers", ".", "setdefault", "(", "'Content-Type'", ",", "'application/json'", ")", "return", "headers" ]
Takes the supplied headers and adds in any which are defined at a client level and then returns the result. :param user_headers: a `dict` containing headers defined at the request level, optional. :return: a `dict` instance
[ "Takes", "the", "supplied", "headers", "and", "adds", "in", "any", "which", "are", "defined", "at", "a", "client", "level", "and", "then", "returns", "the", "result", "." ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/client.py#L119-L136
koordinates/python-client
koordinates/client.py
Client.reverse_url
def reverse_url(self, datatype, url, verb='GET', urltype='single', api_version=None): """ Extracts parameters from a populated URL :param datatype: a string identifying the data the url accesses. :param url: the fully-qualified URL to extract parameters from. :param verb: the HTTP verb needed for use with the url. :param urltype: an adjective used to the nature of the request. :return: dict """ api_version = api_version or 'v1' templates = getattr(self, 'URL_TEMPLATES__%s' % api_version) # this is fairly simplistic, if necessary we could use the parse lib template_url = r"https://(?P<api_host>.+)/services/api/(?P<api_version>.+)" template_url += re.sub(r'{([^}]+)}', r'(?P<\1>.+)', templates[datatype][verb][urltype]) # /foo/{foo_id}/bar/{id}/ m = re.match(template_url, url or '') if not m: raise KeyError("No reverse match from '%s' to %s.%s.%s" % (url, datatype, verb, urltype)) r = m.groupdict() del r['api_host'] if r.pop('api_version') != api_version: raise ValueError("API version mismatch") return r
python
def reverse_url(self, datatype, url, verb='GET', urltype='single', api_version=None): """ Extracts parameters from a populated URL :param datatype: a string identifying the data the url accesses. :param url: the fully-qualified URL to extract parameters from. :param verb: the HTTP verb needed for use with the url. :param urltype: an adjective used to the nature of the request. :return: dict """ api_version = api_version or 'v1' templates = getattr(self, 'URL_TEMPLATES__%s' % api_version) # this is fairly simplistic, if necessary we could use the parse lib template_url = r"https://(?P<api_host>.+)/services/api/(?P<api_version>.+)" template_url += re.sub(r'{([^}]+)}', r'(?P<\1>.+)', templates[datatype][verb][urltype]) # /foo/{foo_id}/bar/{id}/ m = re.match(template_url, url or '') if not m: raise KeyError("No reverse match from '%s' to %s.%s.%s" % (url, datatype, verb, urltype)) r = m.groupdict() del r['api_host'] if r.pop('api_version') != api_version: raise ValueError("API version mismatch") return r
[ "def", "reverse_url", "(", "self", ",", "datatype", ",", "url", ",", "verb", "=", "'GET'", ",", "urltype", "=", "'single'", ",", "api_version", "=", "None", ")", ":", "api_version", "=", "api_version", "or", "'v1'", "templates", "=", "getattr", "(", "self", ",", "'URL_TEMPLATES__%s'", "%", "api_version", ")", "# this is fairly simplistic, if necessary we could use the parse lib", "template_url", "=", "r\"https://(?P<api_host>.+)/services/api/(?P<api_version>.+)\"", "template_url", "+=", "re", ".", "sub", "(", "r'{([^}]+)}'", ",", "r'(?P<\\1>.+)'", ",", "templates", "[", "datatype", "]", "[", "verb", "]", "[", "urltype", "]", ")", "# /foo/{foo_id}/bar/{id}/", "m", "=", "re", ".", "match", "(", "template_url", ",", "url", "or", "''", ")", "if", "not", "m", ":", "raise", "KeyError", "(", "\"No reverse match from '%s' to %s.%s.%s\"", "%", "(", "url", ",", "datatype", ",", "verb", ",", "urltype", ")", ")", "r", "=", "m", ".", "groupdict", "(", ")", "del", "r", "[", "'api_host'", "]", "if", "r", ".", "pop", "(", "'api_version'", ")", "!=", "api_version", ":", "raise", "ValueError", "(", "\"API version mismatch\"", ")", "return", "r" ]
Extracts parameters from a populated URL :param datatype: a string identifying the data the url accesses. :param url: the fully-qualified URL to extract parameters from. :param verb: the HTTP verb needed for use with the url. :param urltype: an adjective used to the nature of the request. :return: dict
[ "Extracts", "parameters", "from", "a", "populated", "URL" ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/client.py#L185-L210
koordinates/python-client
koordinates/client.py
Client.get_url
def get_url(self, datatype, verb, urltype, params={}, api_host=None, api_version=None): """Returns a fully formed url :param datatype: a string identifying the data the url will access. :param verb: the HTTP verb needed for use with the url. :param urltype: an adjective used to the nature of the request. :param \*\*params: substitution variables for the URL. :return: string :rtype: A fully formed url. """ api_version = api_version or 'v1' api_host = api_host or self.host subst = params.copy() subst['api_host'] = api_host subst['api_version'] = api_version url = "https://{api_host}/services/api/{api_version}" url += self.get_url_path(datatype, verb, urltype, params, api_version) return url.format(**subst)
python
def get_url(self, datatype, verb, urltype, params={}, api_host=None, api_version=None): """Returns a fully formed url :param datatype: a string identifying the data the url will access. :param verb: the HTTP verb needed for use with the url. :param urltype: an adjective used to the nature of the request. :param \*\*params: substitution variables for the URL. :return: string :rtype: A fully formed url. """ api_version = api_version or 'v1' api_host = api_host or self.host subst = params.copy() subst['api_host'] = api_host subst['api_version'] = api_version url = "https://{api_host}/services/api/{api_version}" url += self.get_url_path(datatype, verb, urltype, params, api_version) return url.format(**subst)
[ "def", "get_url", "(", "self", ",", "datatype", ",", "verb", ",", "urltype", ",", "params", "=", "{", "}", ",", "api_host", "=", "None", ",", "api_version", "=", "None", ")", ":", "api_version", "=", "api_version", "or", "'v1'", "api_host", "=", "api_host", "or", "self", ".", "host", "subst", "=", "params", ".", "copy", "(", ")", "subst", "[", "'api_host'", "]", "=", "api_host", "subst", "[", "'api_version'", "]", "=", "api_version", "url", "=", "\"https://{api_host}/services/api/{api_version}\"", "url", "+=", "self", ".", "get_url_path", "(", "datatype", ",", "verb", ",", "urltype", ",", "params", ",", "api_version", ")", "return", "url", ".", "format", "(", "*", "*", "subst", ")" ]
Returns a fully formed url :param datatype: a string identifying the data the url will access. :param verb: the HTTP verb needed for use with the url. :param urltype: an adjective used to the nature of the request. :param \*\*params: substitution variables for the URL. :return: string :rtype: A fully formed url.
[ "Returns", "a", "fully", "formed", "url" ]
train
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/client.py#L212-L231
Unidata/siphon
siphon/cdmr/xarray_support.py
CDMRemoteStore.open_store_variable
def open_store_variable(self, name, var): """Turn CDMRemote variable into something like a numpy.ndarray.""" data = indexing.LazilyOuterIndexedArray(CDMArrayWrapper(name, self)) return Variable(var.dimensions, data, {a: getattr(var, a) for a in var.ncattrs()})
python
def open_store_variable(self, name, var): """Turn CDMRemote variable into something like a numpy.ndarray.""" data = indexing.LazilyOuterIndexedArray(CDMArrayWrapper(name, self)) return Variable(var.dimensions, data, {a: getattr(var, a) for a in var.ncattrs()})
[ "def", "open_store_variable", "(", "self", ",", "name", ",", "var", ")", ":", "data", "=", "indexing", ".", "LazilyOuterIndexedArray", "(", "CDMArrayWrapper", "(", "name", ",", "self", ")", ")", "return", "Variable", "(", "var", ".", "dimensions", ",", "data", ",", "{", "a", ":", "getattr", "(", "var", ",", "a", ")", "for", "a", "in", "var", ".", "ncattrs", "(", ")", "}", ")" ]
Turn CDMRemote variable into something like a numpy.ndarray.
[ "Turn", "CDMRemote", "variable", "into", "something", "like", "a", "numpy", ".", "ndarray", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/xarray_support.py#L52-L55
Unidata/siphon
siphon/cdmr/xarray_support.py
CDMRemoteStore.get_attrs
def get_attrs(self): """Get the global attributes from underlying data set.""" return FrozenOrderedDict((a, getattr(self.ds, a)) for a in self.ds.ncattrs())
python
def get_attrs(self): """Get the global attributes from underlying data set.""" return FrozenOrderedDict((a, getattr(self.ds, a)) for a in self.ds.ncattrs())
[ "def", "get_attrs", "(", "self", ")", ":", "return", "FrozenOrderedDict", "(", "(", "a", ",", "getattr", "(", "self", ".", "ds", ",", "a", ")", ")", "for", "a", "in", "self", ".", "ds", ".", "ncattrs", "(", ")", ")" ]
Get the global attributes from underlying data set.
[ "Get", "the", "global", "attributes", "from", "underlying", "data", "set", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/xarray_support.py#L62-L64
Unidata/siphon
siphon/cdmr/xarray_support.py
CDMRemoteStore.get_dimensions
def get_dimensions(self): """Get the dimensions from underlying data set.""" return FrozenOrderedDict((k, len(v)) for k, v in self.ds.dimensions.items())
python
def get_dimensions(self): """Get the dimensions from underlying data set.""" return FrozenOrderedDict((k, len(v)) for k, v in self.ds.dimensions.items())
[ "def", "get_dimensions", "(", "self", ")", ":", "return", "FrozenOrderedDict", "(", "(", "k", ",", "len", "(", "v", ")", ")", "for", "k", ",", "v", "in", "self", ".", "ds", ".", "dimensions", ".", "items", "(", ")", ")" ]
Get the dimensions from underlying data set.
[ "Get", "the", "dimensions", "from", "underlying", "data", "set", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/xarray_support.py#L66-L68
Unidata/siphon
siphon/catalog.py
_find_base_tds_url
def _find_base_tds_url(catalog_url): """Identify the base URL of the THREDDS server from the catalog URL. Will retain URL scheme, host, port and username/password when present. """ url_components = urlparse(catalog_url) if url_components.path: return catalog_url.split(url_components.path)[0] else: return catalog_url
python
def _find_base_tds_url(catalog_url): """Identify the base URL of the THREDDS server from the catalog URL. Will retain URL scheme, host, port and username/password when present. """ url_components = urlparse(catalog_url) if url_components.path: return catalog_url.split(url_components.path)[0] else: return catalog_url
[ "def", "_find_base_tds_url", "(", "catalog_url", ")", ":", "url_components", "=", "urlparse", "(", "catalog_url", ")", "if", "url_components", ".", "path", ":", "return", "catalog_url", ".", "split", "(", "url_components", ".", "path", ")", "[", "0", "]", "else", ":", "return", "catalog_url" ]
Identify the base URL of the THREDDS server from the catalog URL. Will retain URL scheme, host, port and username/password when present.
[ "Identify", "the", "base", "URL", "of", "the", "THREDDS", "server", "from", "the", "catalog", "URL", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L790-L799
Unidata/siphon
siphon/catalog.py
DatasetCollection.filter_time_nearest
def filter_time_nearest(self, time, regex=None): """Filter keys for an item closest to the desired time. Loops over all keys in the collection and uses `regex` to extract and build `datetime`s. The collection of `datetime`s is compared to `start` and the value that has a `datetime` closest to that requested is returned.If none of the keys in the collection match the regex, indicating that the keys are not date/time-based, a ``ValueError`` is raised. Parameters ---------- time : ``datetime.datetime`` The desired time regex : str, optional The regular expression to use to extract date/time information from the key. If given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute', 'second', and 'microsecond', as appropriate. When a match is found, any of those groups missing from the pattern will be assigned a value of 0. The default pattern looks for patterns like: 20171118_2356. Returns ------- The value with a time closest to that desired """ return min(self._get_datasets_with_times(regex), key=lambda i: abs((i[0] - time).total_seconds()))[-1]
python
def filter_time_nearest(self, time, regex=None): """Filter keys for an item closest to the desired time. Loops over all keys in the collection and uses `regex` to extract and build `datetime`s. The collection of `datetime`s is compared to `start` and the value that has a `datetime` closest to that requested is returned.If none of the keys in the collection match the regex, indicating that the keys are not date/time-based, a ``ValueError`` is raised. Parameters ---------- time : ``datetime.datetime`` The desired time regex : str, optional The regular expression to use to extract date/time information from the key. If given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute', 'second', and 'microsecond', as appropriate. When a match is found, any of those groups missing from the pattern will be assigned a value of 0. The default pattern looks for patterns like: 20171118_2356. Returns ------- The value with a time closest to that desired """ return min(self._get_datasets_with_times(regex), key=lambda i: abs((i[0] - time).total_seconds()))[-1]
[ "def", "filter_time_nearest", "(", "self", ",", "time", ",", "regex", "=", "None", ")", ":", "return", "min", "(", "self", ".", "_get_datasets_with_times", "(", "regex", ")", ",", "key", "=", "lambda", "i", ":", "abs", "(", "(", "i", "[", "0", "]", "-", "time", ")", ".", "total_seconds", "(", ")", ")", ")", "[", "-", "1", "]" ]
Filter keys for an item closest to the desired time. Loops over all keys in the collection and uses `regex` to extract and build `datetime`s. The collection of `datetime`s is compared to `start` and the value that has a `datetime` closest to that requested is returned.If none of the keys in the collection match the regex, indicating that the keys are not date/time-based, a ``ValueError`` is raised. Parameters ---------- time : ``datetime.datetime`` The desired time regex : str, optional The regular expression to use to extract date/time information from the key. If given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute', 'second', and 'microsecond', as appropriate. When a match is found, any of those groups missing from the pattern will be assigned a value of 0. The default pattern looks for patterns like: 20171118_2356. Returns ------- The value with a time closest to that desired
[ "Filter", "keys", "for", "an", "item", "closest", "to", "the", "desired", "time", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L74-L100
Unidata/siphon
siphon/catalog.py
DatasetCollection.filter_time_range
def filter_time_range(self, start, end, regex=None): """Filter keys for all items within the desired time range. Loops over all keys in the collection and uses `regex` to extract and build `datetime`s. From the collection of `datetime`s, all values within `start` and `end` (inclusive) are returned. If none of the keys in the collection match the regex, indicating that the keys are not date/time-based, a ``ValueError`` is raised. Parameters ---------- start : ``datetime.datetime`` The start of the desired time range, inclusive end : ``datetime.datetime`` The end of the desired time range, inclusive regex : str, optional The regular expression to use to extract date/time information from the key. If given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute', 'second', and 'microsecond', as appropriate. When a match is found, any of those groups missing from the pattern will be assigned a value of 0. The default pattern looks for patterns like: 20171118_2356. Returns ------- All values corresponding to times within the specified range """ return [item[-1] for item in self._get_datasets_with_times(regex) if start <= item[0] <= end]
python
def filter_time_range(self, start, end, regex=None): """Filter keys for all items within the desired time range. Loops over all keys in the collection and uses `regex` to extract and build `datetime`s. From the collection of `datetime`s, all values within `start` and `end` (inclusive) are returned. If none of the keys in the collection match the regex, indicating that the keys are not date/time-based, a ``ValueError`` is raised. Parameters ---------- start : ``datetime.datetime`` The start of the desired time range, inclusive end : ``datetime.datetime`` The end of the desired time range, inclusive regex : str, optional The regular expression to use to extract date/time information from the key. If given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute', 'second', and 'microsecond', as appropriate. When a match is found, any of those groups missing from the pattern will be assigned a value of 0. The default pattern looks for patterns like: 20171118_2356. Returns ------- All values corresponding to times within the specified range """ return [item[-1] for item in self._get_datasets_with_times(regex) if start <= item[0] <= end]
[ "def", "filter_time_range", "(", "self", ",", "start", ",", "end", ",", "regex", "=", "None", ")", ":", "return", "[", "item", "[", "-", "1", "]", "for", "item", "in", "self", ".", "_get_datasets_with_times", "(", "regex", ")", "if", "start", "<=", "item", "[", "0", "]", "<=", "end", "]" ]
Filter keys for all items within the desired time range. Loops over all keys in the collection and uses `regex` to extract and build `datetime`s. From the collection of `datetime`s, all values within `start` and `end` (inclusive) are returned. If none of the keys in the collection match the regex, indicating that the keys are not date/time-based, a ``ValueError`` is raised. Parameters ---------- start : ``datetime.datetime`` The start of the desired time range, inclusive end : ``datetime.datetime`` The end of the desired time range, inclusive regex : str, optional The regular expression to use to extract date/time information from the key. If given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute', 'second', and 'microsecond', as appropriate. When a match is found, any of those groups missing from the pattern will be assigned a value of 0. The default pattern looks for patterns like: 20171118_2356. Returns ------- All values corresponding to times within the specified range
[ "Filter", "keys", "for", "all", "items", "within", "the", "desired", "time", "range", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L102-L129
Unidata/siphon
siphon/catalog.py
CaseInsensitiveDict.pop
def pop(self, key, *args, **kwargs): """Remove and return the value associated with case-insensitive ``key``.""" return super(CaseInsensitiveDict, self).pop(CaseInsensitiveStr(key))
python
def pop(self, key, *args, **kwargs): """Remove and return the value associated with case-insensitive ``key``.""" return super(CaseInsensitiveDict, self).pop(CaseInsensitiveStr(key))
[ "def", "pop", "(", "self", ",", "key", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "CaseInsensitiveDict", ",", "self", ")", ".", "pop", "(", "CaseInsensitiveStr", "(", "key", ")", ")" ]
Remove and return the value associated with case-insensitive ``key``.
[ "Remove", "and", "return", "the", "value", "associated", "with", "case", "-", "insensitive", "key", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L210-L212
Unidata/siphon
siphon/catalog.py
CaseInsensitiveDict._keys_to_lower
def _keys_to_lower(self): """Convert key set to lowercase.""" for k in list(self.keys()): val = super(CaseInsensitiveDict, self).__getitem__(k) super(CaseInsensitiveDict, self).__delitem__(k) self.__setitem__(CaseInsensitiveStr(k), val)
python
def _keys_to_lower(self): """Convert key set to lowercase.""" for k in list(self.keys()): val = super(CaseInsensitiveDict, self).__getitem__(k) super(CaseInsensitiveDict, self).__delitem__(k) self.__setitem__(CaseInsensitiveStr(k), val)
[ "def", "_keys_to_lower", "(", "self", ")", ":", "for", "k", "in", "list", "(", "self", ".", "keys", "(", ")", ")", ":", "val", "=", "super", "(", "CaseInsensitiveDict", ",", "self", ")", ".", "__getitem__", "(", "k", ")", "super", "(", "CaseInsensitiveDict", ",", "self", ")", ".", "__delitem__", "(", "k", ")", "self", ".", "__setitem__", "(", "CaseInsensitiveStr", "(", "k", ")", ",", "val", ")" ]
Convert key set to lowercase.
[ "Convert", "key", "set", "to", "lowercase", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L214-L219
Unidata/siphon
siphon/catalog.py
Dataset.resolve_url
def resolve_url(self, catalog_url): """Resolve the url of the dataset when reading latest.xml. Parameters ---------- catalog_url : str The catalog url to be resolved """ if catalog_url != '': resolver_base = catalog_url.split('catalog.xml')[0] resolver_url = resolver_base + self.url_path resolver_xml = session_manager.urlopen(resolver_url) tree = ET.parse(resolver_xml) root = tree.getroot() if 'name' in root.attrib: self.catalog_name = root.attrib['name'] else: self.catalog_name = 'No name found' resolved_url = '' found = False for child in root.iter(): if not found: tag_type = child.tag.split('}')[-1] if tag_type == 'dataset': if 'urlPath' in child.attrib: ds = Dataset(child) resolved_url = ds.url_path found = True if found: return resolved_url else: log.warning('no dataset url path found in latest.xml!')
python
def resolve_url(self, catalog_url): """Resolve the url of the dataset when reading latest.xml. Parameters ---------- catalog_url : str The catalog url to be resolved """ if catalog_url != '': resolver_base = catalog_url.split('catalog.xml')[0] resolver_url = resolver_base + self.url_path resolver_xml = session_manager.urlopen(resolver_url) tree = ET.parse(resolver_xml) root = tree.getroot() if 'name' in root.attrib: self.catalog_name = root.attrib['name'] else: self.catalog_name = 'No name found' resolved_url = '' found = False for child in root.iter(): if not found: tag_type = child.tag.split('}')[-1] if tag_type == 'dataset': if 'urlPath' in child.attrib: ds = Dataset(child) resolved_url = ds.url_path found = True if found: return resolved_url else: log.warning('no dataset url path found in latest.xml!')
[ "def", "resolve_url", "(", "self", ",", "catalog_url", ")", ":", "if", "catalog_url", "!=", "''", ":", "resolver_base", "=", "catalog_url", ".", "split", "(", "'catalog.xml'", ")", "[", "0", "]", "resolver_url", "=", "resolver_base", "+", "self", ".", "url_path", "resolver_xml", "=", "session_manager", ".", "urlopen", "(", "resolver_url", ")", "tree", "=", "ET", ".", "parse", "(", "resolver_xml", ")", "root", "=", "tree", ".", "getroot", "(", ")", "if", "'name'", "in", "root", ".", "attrib", ":", "self", ".", "catalog_name", "=", "root", ".", "attrib", "[", "'name'", "]", "else", ":", "self", ".", "catalog_name", "=", "'No name found'", "resolved_url", "=", "''", "found", "=", "False", "for", "child", "in", "root", ".", "iter", "(", ")", ":", "if", "not", "found", ":", "tag_type", "=", "child", ".", "tag", ".", "split", "(", "'}'", ")", "[", "-", "1", "]", "if", "tag_type", "==", "'dataset'", ":", "if", "'urlPath'", "in", "child", ".", "attrib", ":", "ds", "=", "Dataset", "(", "child", ")", "resolved_url", "=", "ds", ".", "url_path", "found", "=", "True", "if", "found", ":", "return", "resolved_url", "else", ":", "log", ".", "warning", "(", "'no dataset url path found in latest.xml!'", ")" ]
Resolve the url of the dataset when reading latest.xml. Parameters ---------- catalog_url : str The catalog url to be resolved
[ "Resolve", "the", "url", "of", "the", "dataset", "when", "reading", "latest", ".", "xml", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L477-L509
Unidata/siphon
siphon/catalog.py
Dataset.make_access_urls
def make_access_urls(self, catalog_url, all_services, metadata=None): """Make fully qualified urls for the access methods enabled on the dataset. Parameters ---------- catalog_url : str The top level server url all_services : List[SimpleService] list of :class:`SimpleService` objects associated with the dataset metadata : dict Metadata from the :class:`TDSCatalog` """ all_service_dict = CaseInsensitiveDict({}) for service in all_services: all_service_dict[service.name] = service if isinstance(service, CompoundService): for subservice in service.services: all_service_dict[subservice.name] = subservice service_name = metadata.get('serviceName', None) access_urls = CaseInsensitiveDict({}) server_url = _find_base_tds_url(catalog_url) # process access urls for datasets that reference top # level catalog services (individual or compound service # types). if service_name in all_service_dict: service = all_service_dict[service_name] if service.service_type != 'Resolver': # if service is a CompoundService, create access url # for each SimpleService if isinstance(service, CompoundService): for subservice in service.services: server_base = urljoin(server_url, subservice.base) access_urls[subservice.service_type] = urljoin(server_base, self.url_path) else: server_base = urljoin(server_url, service.base) access_urls[service.service_type] = urljoin(server_base, self.url_path) # process access children of dataset elements for service_type in self.access_element_info: url_path = self.access_element_info[service_type] if service_type in all_service_dict: server_base = urljoin(server_url, all_service_dict[service_type].base) access_urls[service_type] = urljoin(server_base, url_path) self.access_urls = access_urls
python
def make_access_urls(self, catalog_url, all_services, metadata=None): """Make fully qualified urls for the access methods enabled on the dataset. Parameters ---------- catalog_url : str The top level server url all_services : List[SimpleService] list of :class:`SimpleService` objects associated with the dataset metadata : dict Metadata from the :class:`TDSCatalog` """ all_service_dict = CaseInsensitiveDict({}) for service in all_services: all_service_dict[service.name] = service if isinstance(service, CompoundService): for subservice in service.services: all_service_dict[subservice.name] = subservice service_name = metadata.get('serviceName', None) access_urls = CaseInsensitiveDict({}) server_url = _find_base_tds_url(catalog_url) # process access urls for datasets that reference top # level catalog services (individual or compound service # types). if service_name in all_service_dict: service = all_service_dict[service_name] if service.service_type != 'Resolver': # if service is a CompoundService, create access url # for each SimpleService if isinstance(service, CompoundService): for subservice in service.services: server_base = urljoin(server_url, subservice.base) access_urls[subservice.service_type] = urljoin(server_base, self.url_path) else: server_base = urljoin(server_url, service.base) access_urls[service.service_type] = urljoin(server_base, self.url_path) # process access children of dataset elements for service_type in self.access_element_info: url_path = self.access_element_info[service_type] if service_type in all_service_dict: server_base = urljoin(server_url, all_service_dict[service_type].base) access_urls[service_type] = urljoin(server_base, url_path) self.access_urls = access_urls
[ "def", "make_access_urls", "(", "self", ",", "catalog_url", ",", "all_services", ",", "metadata", "=", "None", ")", ":", "all_service_dict", "=", "CaseInsensitiveDict", "(", "{", "}", ")", "for", "service", "in", "all_services", ":", "all_service_dict", "[", "service", ".", "name", "]", "=", "service", "if", "isinstance", "(", "service", ",", "CompoundService", ")", ":", "for", "subservice", "in", "service", ".", "services", ":", "all_service_dict", "[", "subservice", ".", "name", "]", "=", "subservice", "service_name", "=", "metadata", ".", "get", "(", "'serviceName'", ",", "None", ")", "access_urls", "=", "CaseInsensitiveDict", "(", "{", "}", ")", "server_url", "=", "_find_base_tds_url", "(", "catalog_url", ")", "# process access urls for datasets that reference top", "# level catalog services (individual or compound service", "# types).", "if", "service_name", "in", "all_service_dict", ":", "service", "=", "all_service_dict", "[", "service_name", "]", "if", "service", ".", "service_type", "!=", "'Resolver'", ":", "# if service is a CompoundService, create access url", "# for each SimpleService", "if", "isinstance", "(", "service", ",", "CompoundService", ")", ":", "for", "subservice", "in", "service", ".", "services", ":", "server_base", "=", "urljoin", "(", "server_url", ",", "subservice", ".", "base", ")", "access_urls", "[", "subservice", ".", "service_type", "]", "=", "urljoin", "(", "server_base", ",", "self", ".", "url_path", ")", "else", ":", "server_base", "=", "urljoin", "(", "server_url", ",", "service", ".", "base", ")", "access_urls", "[", "service", ".", "service_type", "]", "=", "urljoin", "(", "server_base", ",", "self", ".", "url_path", ")", "# process access children of dataset elements", "for", "service_type", "in", "self", ".", "access_element_info", ":", "url_path", "=", "self", ".", "access_element_info", "[", "service_type", "]", "if", "service_type", "in", "all_service_dict", ":", "server_base", "=", "urljoin", "(", "server_url", ",", "all_service_dict", "[", "service_type", "]", ".", "base", ")", "access_urls", "[", "service_type", "]", "=", "urljoin", "(", "server_base", ",", "url_path", ")", "self", ".", "access_urls", "=", "access_urls" ]
Make fully qualified urls for the access methods enabled on the dataset. Parameters ---------- catalog_url : str The top level server url all_services : List[SimpleService] list of :class:`SimpleService` objects associated with the dataset metadata : dict Metadata from the :class:`TDSCatalog`
[ "Make", "fully", "qualified", "urls", "for", "the", "access", "methods", "enabled", "on", "the", "dataset", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L511-L560
Unidata/siphon
siphon/catalog.py
Dataset.add_access_element_info
def add_access_element_info(self, access_element): """Create an access method from a catalog element.""" service_name = access_element.attrib['serviceName'] url_path = access_element.attrib['urlPath'] self.access_element_info[service_name] = url_path
python
def add_access_element_info(self, access_element): """Create an access method from a catalog element.""" service_name = access_element.attrib['serviceName'] url_path = access_element.attrib['urlPath'] self.access_element_info[service_name] = url_path
[ "def", "add_access_element_info", "(", "self", ",", "access_element", ")", ":", "service_name", "=", "access_element", ".", "attrib", "[", "'serviceName'", "]", "url_path", "=", "access_element", ".", "attrib", "[", "'urlPath'", "]", "self", ".", "access_element_info", "[", "service_name", "]", "=", "url_path" ]
Create an access method from a catalog element.
[ "Create", "an", "access", "method", "from", "a", "catalog", "element", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L562-L566
Unidata/siphon
siphon/catalog.py
Dataset.download
def download(self, filename=None): """Download the dataset to a local file. Parameters ---------- filename : str, optional The full path to which the dataset will be saved """ if filename is None: filename = self.name with self.remote_open() as infile: with open(filename, 'wb') as outfile: outfile.write(infile.read())
python
def download(self, filename=None): """Download the dataset to a local file. Parameters ---------- filename : str, optional The full path to which the dataset will be saved """ if filename is None: filename = self.name with self.remote_open() as infile: with open(filename, 'wb') as outfile: outfile.write(infile.read())
[ "def", "download", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "self", ".", "name", "with", "self", ".", "remote_open", "(", ")", "as", "infile", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "infile", ".", "read", "(", ")", ")" ]
Download the dataset to a local file. Parameters ---------- filename : str, optional The full path to which the dataset will be saved
[ "Download", "the", "dataset", "to", "a", "local", "file", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L568-L581
Unidata/siphon
siphon/catalog.py
Dataset.remote_access
def remote_access(self, service=None, use_xarray=None): """Access the remote dataset. Open the remote dataset and get a netCDF4-compatible `Dataset` object providing index-based subsetting capabilities. Parameters ---------- service : str, optional The name of the service to use for access to the dataset, either 'CdmRemote' or 'OPENDAP'. Defaults to 'CdmRemote'. Returns ------- Dataset Object for netCDF4-like access to the dataset """ if service is None: service = 'CdmRemote' if 'CdmRemote' in self.access_urls else 'OPENDAP' if service not in (CaseInsensitiveStr('CdmRemote'), CaseInsensitiveStr('OPENDAP')): raise ValueError(service + ' is not a valid service for remote_access') return self.access_with_service(service, use_xarray)
python
def remote_access(self, service=None, use_xarray=None): """Access the remote dataset. Open the remote dataset and get a netCDF4-compatible `Dataset` object providing index-based subsetting capabilities. Parameters ---------- service : str, optional The name of the service to use for access to the dataset, either 'CdmRemote' or 'OPENDAP'. Defaults to 'CdmRemote'. Returns ------- Dataset Object for netCDF4-like access to the dataset """ if service is None: service = 'CdmRemote' if 'CdmRemote' in self.access_urls else 'OPENDAP' if service not in (CaseInsensitiveStr('CdmRemote'), CaseInsensitiveStr('OPENDAP')): raise ValueError(service + ' is not a valid service for remote_access') return self.access_with_service(service, use_xarray)
[ "def", "remote_access", "(", "self", ",", "service", "=", "None", ",", "use_xarray", "=", "None", ")", ":", "if", "service", "is", "None", ":", "service", "=", "'CdmRemote'", "if", "'CdmRemote'", "in", "self", ".", "access_urls", "else", "'OPENDAP'", "if", "service", "not", "in", "(", "CaseInsensitiveStr", "(", "'CdmRemote'", ")", ",", "CaseInsensitiveStr", "(", "'OPENDAP'", ")", ")", ":", "raise", "ValueError", "(", "service", "+", "' is not a valid service for remote_access'", ")", "return", "self", ".", "access_with_service", "(", "service", ",", "use_xarray", ")" ]
Access the remote dataset. Open the remote dataset and get a netCDF4-compatible `Dataset` object providing index-based subsetting capabilities. Parameters ---------- service : str, optional The name of the service to use for access to the dataset, either 'CdmRemote' or 'OPENDAP'. Defaults to 'CdmRemote'. Returns ------- Dataset Object for netCDF4-like access to the dataset
[ "Access", "the", "remote", "dataset", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L596-L620
Unidata/siphon
siphon/catalog.py
Dataset.subset
def subset(self, service=None): """Subset the dataset. Open the remote dataset and get a client for talking to ``service``. Parameters ---------- service : str, optional The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset' or 'NetcdfServer', in that order, depending on the services listed in the catalog. Returns ------- a client for communicating using ``service`` """ if service is None: for serviceName in self.ncssServiceNames: if serviceName in self.access_urls: service = serviceName break else: raise RuntimeError('Subset access is not available for this dataset.') elif service not in self.ncssServiceNames: raise ValueError(service + ' is not a valid service for subset. Options are: ' + ', '.join(self.ncssServiceNames)) return self.access_with_service(service)
python
def subset(self, service=None): """Subset the dataset. Open the remote dataset and get a client for talking to ``service``. Parameters ---------- service : str, optional The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset' or 'NetcdfServer', in that order, depending on the services listed in the catalog. Returns ------- a client for communicating using ``service`` """ if service is None: for serviceName in self.ncssServiceNames: if serviceName in self.access_urls: service = serviceName break else: raise RuntimeError('Subset access is not available for this dataset.') elif service not in self.ncssServiceNames: raise ValueError(service + ' is not a valid service for subset. Options are: ' + ', '.join(self.ncssServiceNames)) return self.access_with_service(service)
[ "def", "subset", "(", "self", ",", "service", "=", "None", ")", ":", "if", "service", "is", "None", ":", "for", "serviceName", "in", "self", ".", "ncssServiceNames", ":", "if", "serviceName", "in", "self", ".", "access_urls", ":", "service", "=", "serviceName", "break", "else", ":", "raise", "RuntimeError", "(", "'Subset access is not available for this dataset.'", ")", "elif", "service", "not", "in", "self", ".", "ncssServiceNames", ":", "raise", "ValueError", "(", "service", "+", "' is not a valid service for subset. Options are: '", "+", "', '", ".", "join", "(", "self", ".", "ncssServiceNames", ")", ")", "return", "self", ".", "access_with_service", "(", "service", ")" ]
Subset the dataset. Open the remote dataset and get a client for talking to ``service``. Parameters ---------- service : str, optional The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset' or 'NetcdfServer', in that order, depending on the services listed in the catalog. Returns ------- a client for communicating using ``service``
[ "Subset", "the", "dataset", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L622-L650
Unidata/siphon
siphon/catalog.py
Dataset.access_with_service
def access_with_service(self, service, use_xarray=None): """Access the dataset using a particular service. Return an Python object capable of communicating with the server using the particular service. For instance, for 'HTTPServer' this is a file-like object capable of HTTP communication; for OPENDAP this is a netCDF4 dataset. Parameters ---------- service : str The name of the service for accessing the dataset Returns ------- An instance appropriate for communicating using ``service``. """ service = CaseInsensitiveStr(service) if service == 'CdmRemote': if use_xarray: from .cdmr.xarray_support import CDMRemoteStore try: import xarray as xr provider = lambda url: xr.open_dataset(CDMRemoteStore(url)) # noqa: E731 except ImportError: raise ImportError('CdmRemote access needs xarray to be installed.') else: from .cdmr import Dataset as CDMRDataset provider = CDMRDataset elif service == 'OPENDAP': if use_xarray: try: import xarray as xr provider = xr.open_dataset except ImportError: raise ImportError('xarray to be installed if `use_xarray` is True.') else: try: from netCDF4 import Dataset as NC4Dataset provider = NC4Dataset except ImportError: raise ImportError('OPENDAP access needs netCDF4-python to be installed.') elif service in self.ncssServiceNames: from .ncss import NCSS provider = NCSS elif service == 'HTTPServer': provider = session_manager.urlopen else: raise ValueError(service + ' is not an access method supported by Siphon') try: return provider(self.access_urls[service]) except KeyError: raise ValueError(service + ' is not available for this dataset')
python
def access_with_service(self, service, use_xarray=None): """Access the dataset using a particular service. Return an Python object capable of communicating with the server using the particular service. For instance, for 'HTTPServer' this is a file-like object capable of HTTP communication; for OPENDAP this is a netCDF4 dataset. Parameters ---------- service : str The name of the service for accessing the dataset Returns ------- An instance appropriate for communicating using ``service``. """ service = CaseInsensitiveStr(service) if service == 'CdmRemote': if use_xarray: from .cdmr.xarray_support import CDMRemoteStore try: import xarray as xr provider = lambda url: xr.open_dataset(CDMRemoteStore(url)) # noqa: E731 except ImportError: raise ImportError('CdmRemote access needs xarray to be installed.') else: from .cdmr import Dataset as CDMRDataset provider = CDMRDataset elif service == 'OPENDAP': if use_xarray: try: import xarray as xr provider = xr.open_dataset except ImportError: raise ImportError('xarray to be installed if `use_xarray` is True.') else: try: from netCDF4 import Dataset as NC4Dataset provider = NC4Dataset except ImportError: raise ImportError('OPENDAP access needs netCDF4-python to be installed.') elif service in self.ncssServiceNames: from .ncss import NCSS provider = NCSS elif service == 'HTTPServer': provider = session_manager.urlopen else: raise ValueError(service + ' is not an access method supported by Siphon') try: return provider(self.access_urls[service]) except KeyError: raise ValueError(service + ' is not available for this dataset')
[ "def", "access_with_service", "(", "self", ",", "service", ",", "use_xarray", "=", "None", ")", ":", "service", "=", "CaseInsensitiveStr", "(", "service", ")", "if", "service", "==", "'CdmRemote'", ":", "if", "use_xarray", ":", "from", ".", "cdmr", ".", "xarray_support", "import", "CDMRemoteStore", "try", ":", "import", "xarray", "as", "xr", "provider", "=", "lambda", "url", ":", "xr", ".", "open_dataset", "(", "CDMRemoteStore", "(", "url", ")", ")", "# noqa: E731", "except", "ImportError", ":", "raise", "ImportError", "(", "'CdmRemote access needs xarray to be installed.'", ")", "else", ":", "from", ".", "cdmr", "import", "Dataset", "as", "CDMRDataset", "provider", "=", "CDMRDataset", "elif", "service", "==", "'OPENDAP'", ":", "if", "use_xarray", ":", "try", ":", "import", "xarray", "as", "xr", "provider", "=", "xr", ".", "open_dataset", "except", "ImportError", ":", "raise", "ImportError", "(", "'xarray to be installed if `use_xarray` is True.'", ")", "else", ":", "try", ":", "from", "netCDF4", "import", "Dataset", "as", "NC4Dataset", "provider", "=", "NC4Dataset", "except", "ImportError", ":", "raise", "ImportError", "(", "'OPENDAP access needs netCDF4-python to be installed.'", ")", "elif", "service", "in", "self", ".", "ncssServiceNames", ":", "from", ".", "ncss", "import", "NCSS", "provider", "=", "NCSS", "elif", "service", "==", "'HTTPServer'", ":", "provider", "=", "session_manager", ".", "urlopen", "else", ":", "raise", "ValueError", "(", "service", "+", "' is not an access method supported by Siphon'", ")", "try", ":", "return", "provider", "(", "self", ".", "access_urls", "[", "service", "]", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "service", "+", "' is not available for this dataset'", ")" ]
Access the dataset using a particular service. Return an Python object capable of communicating with the server using the particular service. For instance, for 'HTTPServer' this is a file-like object capable of HTTP communication; for OPENDAP this is a netCDF4 dataset. Parameters ---------- service : str The name of the service for accessing the dataset Returns ------- An instance appropriate for communicating using ``service``.
[ "Access", "the", "dataset", "using", "a", "particular", "service", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/catalog.py#L652-L705
Unidata/siphon
siphon/_tools.py
get_wind_components
def get_wind_components(speed, wdir): r"""Calculate the U, V wind vector components from the speed and direction. Parameters ---------- speed : array_like The wind speed (magnitude) wdir : array_like The wind direction, specified as the direction from which the wind is blowing, with 0 being North. Returns ------- u, v : tuple of array_like The wind components in the X (East-West) and Y (North-South) directions, respectively. """ u = -speed * np.sin(wdir) v = -speed * np.cos(wdir) return u, v
python
def get_wind_components(speed, wdir): r"""Calculate the U, V wind vector components from the speed and direction. Parameters ---------- speed : array_like The wind speed (magnitude) wdir : array_like The wind direction, specified as the direction from which the wind is blowing, with 0 being North. Returns ------- u, v : tuple of array_like The wind components in the X (East-West) and Y (North-South) directions, respectively. """ u = -speed * np.sin(wdir) v = -speed * np.cos(wdir) return u, v
[ "def", "get_wind_components", "(", "speed", ",", "wdir", ")", ":", "u", "=", "-", "speed", "*", "np", ".", "sin", "(", "wdir", ")", "v", "=", "-", "speed", "*", "np", ".", "cos", "(", "wdir", ")", "return", "u", ",", "v" ]
r"""Calculate the U, V wind vector components from the speed and direction. Parameters ---------- speed : array_like The wind speed (magnitude) wdir : array_like The wind direction, specified as the direction from which the wind is blowing, with 0 being North. Returns ------- u, v : tuple of array_like The wind components in the X (East-West) and Y (North-South) directions, respectively.
[ "r", "Calculate", "the", "U", "V", "wind", "vector", "components", "from", "the", "speed", "and", "direction", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/_tools.py#L9-L29
Unidata/siphon
siphon/cdmr/cdmremotefeature.py
CDMRemoteFeature._get_metadata
def _get_metadata(self): """Get header information and store as metadata for the endpoint.""" self.metadata = self.fetch_header() self.variables = {g.name for g in self.metadata.grids}
python
def _get_metadata(self): """Get header information and store as metadata for the endpoint.""" self.metadata = self.fetch_header() self.variables = {g.name for g in self.metadata.grids}
[ "def", "_get_metadata", "(", "self", ")", ":", "self", ".", "metadata", "=", "self", ".", "fetch_header", "(", ")", "self", ".", "variables", "=", "{", "g", ".", "name", "for", "g", "in", "self", ".", "metadata", ".", "grids", "}" ]
Get header information and store as metadata for the endpoint.
[ "Get", "header", "information", "and", "store", "as", "metadata", "for", "the", "endpoint", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/cdmremotefeature.py#L20-L23
Unidata/siphon
siphon/cdmr/cdmremotefeature.py
CDMRemoteFeature.fetch_header
def fetch_header(self): """Make a header request to the endpoint.""" query = self.query().add_query_parameter(req='header') return self._parse_messages(self.get_query(query).content)[0]
python
def fetch_header(self): """Make a header request to the endpoint.""" query = self.query().add_query_parameter(req='header') return self._parse_messages(self.get_query(query).content)[0]
[ "def", "fetch_header", "(", "self", ")", ":", "query", "=", "self", ".", "query", "(", ")", ".", "add_query_parameter", "(", "req", "=", "'header'", ")", "return", "self", ".", "_parse_messages", "(", "self", ".", "get_query", "(", "query", ")", ".", "content", ")", "[", "0", "]" ]
Make a header request to the endpoint.
[ "Make", "a", "header", "request", "to", "the", "endpoint", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/cdmremotefeature.py#L25-L28
Unidata/siphon
siphon/cdmr/cdmremotefeature.py
CDMRemoteFeature.fetch_feature_type
def fetch_feature_type(self): """Request the featureType from the endpoint.""" query = self.query().add_query_parameter(req='featureType') return self.get_query(query).content
python
def fetch_feature_type(self): """Request the featureType from the endpoint.""" query = self.query().add_query_parameter(req='featureType') return self.get_query(query).content
[ "def", "fetch_feature_type", "(", "self", ")", ":", "query", "=", "self", ".", "query", "(", ")", ".", "add_query_parameter", "(", "req", "=", "'featureType'", ")", "return", "self", ".", "get_query", "(", "query", ")", ".", "content" ]
Request the featureType from the endpoint.
[ "Request", "the", "featureType", "from", "the", "endpoint", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/cdmremotefeature.py#L30-L33
Unidata/siphon
siphon/cdmr/cdmremotefeature.py
CDMRemoteFeature.fetch_coords
def fetch_coords(self, query): """Pull down coordinate data from the endpoint.""" q = query.add_query_parameter(req='coord') return self._parse_messages(self.get_query(q).content)
python
def fetch_coords(self, query): """Pull down coordinate data from the endpoint.""" q = query.add_query_parameter(req='coord') return self._parse_messages(self.get_query(q).content)
[ "def", "fetch_coords", "(", "self", ",", "query", ")", ":", "q", "=", "query", ".", "add_query_parameter", "(", "req", "=", "'coord'", ")", "return", "self", ".", "_parse_messages", "(", "self", ".", "get_query", "(", "q", ")", ".", "content", ")" ]
Pull down coordinate data from the endpoint.
[ "Pull", "down", "coordinate", "data", "from", "the", "endpoint", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/cdmremotefeature.py#L35-L38
Unidata/siphon
siphon/simplewebservice/igra2.py
IGRAUpperAir.request_data
def request_data(cls, time, site_id, derived=False): """Retreive IGRA version 2 data for one station. Parameters -------- site_id : str 11-character IGRA2 station identifier. time : datetime The date and time of the desired observation. If list of two times is given, dataframes for all dates within the two dates will be returned. Returns ------- :class: `pandas.DataFrame` containing the data. """ igra2 = cls() # Set parameters for data query if derived: igra2.ftpsite = igra2.ftpsite + 'derived/derived-por/' igra2.suffix = igra2.suffix + '-drvd.txt' else: igra2.ftpsite = igra2.ftpsite + 'data/data-por/' igra2.suffix = igra2.suffix + '-data.txt' if type(time) == datetime.datetime: igra2.begin_date = time igra2.end_date = time else: igra2.begin_date, igra2.end_date = time igra2.site_id = site_id df, headers = igra2._get_data() return df, headers
python
def request_data(cls, time, site_id, derived=False): """Retreive IGRA version 2 data for one station. Parameters -------- site_id : str 11-character IGRA2 station identifier. time : datetime The date and time of the desired observation. If list of two times is given, dataframes for all dates within the two dates will be returned. Returns ------- :class: `pandas.DataFrame` containing the data. """ igra2 = cls() # Set parameters for data query if derived: igra2.ftpsite = igra2.ftpsite + 'derived/derived-por/' igra2.suffix = igra2.suffix + '-drvd.txt' else: igra2.ftpsite = igra2.ftpsite + 'data/data-por/' igra2.suffix = igra2.suffix + '-data.txt' if type(time) == datetime.datetime: igra2.begin_date = time igra2.end_date = time else: igra2.begin_date, igra2.end_date = time igra2.site_id = site_id df, headers = igra2._get_data() return df, headers
[ "def", "request_data", "(", "cls", ",", "time", ",", "site_id", ",", "derived", "=", "False", ")", ":", "igra2", "=", "cls", "(", ")", "# Set parameters for data query", "if", "derived", ":", "igra2", ".", "ftpsite", "=", "igra2", ".", "ftpsite", "+", "'derived/derived-por/'", "igra2", ".", "suffix", "=", "igra2", ".", "suffix", "+", "'-drvd.txt'", "else", ":", "igra2", ".", "ftpsite", "=", "igra2", ".", "ftpsite", "+", "'data/data-por/'", "igra2", ".", "suffix", "=", "igra2", ".", "suffix", "+", "'-data.txt'", "if", "type", "(", "time", ")", "==", "datetime", ".", "datetime", ":", "igra2", ".", "begin_date", "=", "time", "igra2", ".", "end_date", "=", "time", "else", ":", "igra2", ".", "begin_date", ",", "igra2", ".", "end_date", "=", "time", "igra2", ".", "site_id", "=", "site_id", "df", ",", "headers", "=", "igra2", ".", "_get_data", "(", ")", "return", "df", ",", "headers" ]
Retreive IGRA version 2 data for one station. Parameters -------- site_id : str 11-character IGRA2 station identifier. time : datetime The date and time of the desired observation. If list of two times is given, dataframes for all dates within the two dates will be returned. Returns ------- :class: `pandas.DataFrame` containing the data.
[ "Retreive", "IGRA", "version", "2", "data", "for", "one", "station", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/igra2.py#L35-L72
Unidata/siphon
siphon/simplewebservice/igra2.py
IGRAUpperAir._get_data
def _get_data(self): """Process the IGRA2 text file for observations at site_id matching time. Return: ------- :class: `pandas.DataFrame` containing the body data. :class: `pandas.DataFrame` containing the header data. """ # Split the list of times into begin and end dates. If only # one date is supplied, set both begin and end dates equal to that date. body, header, dates_long, dates = self._get_data_raw() params = self._get_fwf_params() df_body = pd.read_fwf(StringIO(body), **params['body']) df_header = pd.read_fwf(StringIO(header), **params['header']) df_body['date'] = dates_long df_body = self._clean_body_df(df_body) df_header = self._clean_header_df(df_header) df_header['date'] = dates return df_body, df_header
python
def _get_data(self): """Process the IGRA2 text file for observations at site_id matching time. Return: ------- :class: `pandas.DataFrame` containing the body data. :class: `pandas.DataFrame` containing the header data. """ # Split the list of times into begin and end dates. If only # one date is supplied, set both begin and end dates equal to that date. body, header, dates_long, dates = self._get_data_raw() params = self._get_fwf_params() df_body = pd.read_fwf(StringIO(body), **params['body']) df_header = pd.read_fwf(StringIO(header), **params['header']) df_body['date'] = dates_long df_body = self._clean_body_df(df_body) df_header = self._clean_header_df(df_header) df_header['date'] = dates return df_body, df_header
[ "def", "_get_data", "(", "self", ")", ":", "# Split the list of times into begin and end dates. If only", "# one date is supplied, set both begin and end dates equal to that date.", "body", ",", "header", ",", "dates_long", ",", "dates", "=", "self", ".", "_get_data_raw", "(", ")", "params", "=", "self", ".", "_get_fwf_params", "(", ")", "df_body", "=", "pd", ".", "read_fwf", "(", "StringIO", "(", "body", ")", ",", "*", "*", "params", "[", "'body'", "]", ")", "df_header", "=", "pd", ".", "read_fwf", "(", "StringIO", "(", "header", ")", ",", "*", "*", "params", "[", "'header'", "]", ")", "df_body", "[", "'date'", "]", "=", "dates_long", "df_body", "=", "self", ".", "_clean_body_df", "(", "df_body", ")", "df_header", "=", "self", ".", "_clean_header_df", "(", "df_header", ")", "df_header", "[", "'date'", "]", "=", "dates", "return", "df_body", ",", "df_header" ]
Process the IGRA2 text file for observations at site_id matching time. Return: ------- :class: `pandas.DataFrame` containing the body data. :class: `pandas.DataFrame` containing the header data.
[ "Process", "the", "IGRA2", "text", "file", "for", "observations", "at", "site_id", "matching", "time", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/igra2.py#L74-L97
Unidata/siphon
siphon/simplewebservice/igra2.py
IGRAUpperAir._get_data_raw
def _get_data_raw(self): """Download observations matching the time range. Returns a tuple with a string for the body, string for the headers, and a list of dates. """ # Import need to be here so we can monkeypatch urlopen for testing and avoid # downloading live data for testing try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen with closing(urlopen(self.ftpsite + self.site_id + self.suffix + '.zip')) as url: f = ZipFile(BytesIO(url.read()), 'r').open(self.site_id + self.suffix) lines = [line.decode('utf-8') for line in f.readlines()] body, header, dates_long, dates = self._select_date_range(lines) return body, header, dates_long, dates
python
def _get_data_raw(self): """Download observations matching the time range. Returns a tuple with a string for the body, string for the headers, and a list of dates. """ # Import need to be here so we can monkeypatch urlopen for testing and avoid # downloading live data for testing try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen with closing(urlopen(self.ftpsite + self.site_id + self.suffix + '.zip')) as url: f = ZipFile(BytesIO(url.read()), 'r').open(self.site_id + self.suffix) lines = [line.decode('utf-8') for line in f.readlines()] body, header, dates_long, dates = self._select_date_range(lines) return body, header, dates_long, dates
[ "def", "_get_data_raw", "(", "self", ")", ":", "# Import need to be here so we can monkeypatch urlopen for testing and avoid", "# downloading live data for testing", "try", ":", "from", "urllib", ".", "request", "import", "urlopen", "except", "ImportError", ":", "from", "urllib2", "import", "urlopen", "with", "closing", "(", "urlopen", "(", "self", ".", "ftpsite", "+", "self", ".", "site_id", "+", "self", ".", "suffix", "+", "'.zip'", ")", ")", "as", "url", ":", "f", "=", "ZipFile", "(", "BytesIO", "(", "url", ".", "read", "(", ")", ")", ",", "'r'", ")", ".", "open", "(", "self", ".", "site_id", "+", "self", ".", "suffix", ")", "lines", "=", "[", "line", ".", "decode", "(", "'utf-8'", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", "]", "body", ",", "header", ",", "dates_long", ",", "dates", "=", "self", ".", "_select_date_range", "(", "lines", ")", "return", "body", ",", "header", ",", "dates_long", ",", "dates" ]
Download observations matching the time range. Returns a tuple with a string for the body, string for the headers, and a list of dates.
[ "Download", "observations", "matching", "the", "time", "range", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/igra2.py#L99-L119
Unidata/siphon
siphon/simplewebservice/igra2.py
IGRAUpperAir._select_date_range
def _select_date_range(self, lines): """Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file. """ headers = [] num_lev = [] dates = [] # Get indices of headers, and make a list of dates and num_lev for idx, line in enumerate(lines): if line[0] == '#': year, month, day, hour = map(int, line[13:26].split()) # All soundings have YMD, most have hour try: date = datetime.datetime(year, month, day, hour) except ValueError: date = datetime.datetime(year, month, day) # Check date if self.begin_date <= date <= self.end_date: headers.append(idx) num_lev.append(int(line[32:36])) dates.append(date) if date > self.end_date: break if len(dates) == 0: # Break if no matched dates. # Could improve this later by showing the date range for the station. raise ValueError('No dates match selection.') # Compress body of data into a string begin_idx = min(headers) end_idx = max(headers) + num_lev[-1] # Make a boolean vector that selects only list indices within the time range selector = np.zeros(len(lines), dtype=bool) selector[begin_idx:end_idx + 1] = True selector[headers] = False body = ''.join([line for line in itertools.compress(lines, selector)]) selector[begin_idx:end_idx + 1] = ~selector[begin_idx:end_idx + 1] header = ''.join([line for line in itertools.compress(lines, selector)]) # expand date vector to match length of the body dataframe. dates_long = np.repeat(dates, num_lev) return body, header, dates_long, dates
python
def _select_date_range(self, lines): """Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file. """ headers = [] num_lev = [] dates = [] # Get indices of headers, and make a list of dates and num_lev for idx, line in enumerate(lines): if line[0] == '#': year, month, day, hour = map(int, line[13:26].split()) # All soundings have YMD, most have hour try: date = datetime.datetime(year, month, day, hour) except ValueError: date = datetime.datetime(year, month, day) # Check date if self.begin_date <= date <= self.end_date: headers.append(idx) num_lev.append(int(line[32:36])) dates.append(date) if date > self.end_date: break if len(dates) == 0: # Break if no matched dates. # Could improve this later by showing the date range for the station. raise ValueError('No dates match selection.') # Compress body of data into a string begin_idx = min(headers) end_idx = max(headers) + num_lev[-1] # Make a boolean vector that selects only list indices within the time range selector = np.zeros(len(lines), dtype=bool) selector[begin_idx:end_idx + 1] = True selector[headers] = False body = ''.join([line for line in itertools.compress(lines, selector)]) selector[begin_idx:end_idx + 1] = ~selector[begin_idx:end_idx + 1] header = ''.join([line for line in itertools.compress(lines, selector)]) # expand date vector to match length of the body dataframe. dates_long = np.repeat(dates, num_lev) return body, header, dates_long, dates
[ "def", "_select_date_range", "(", "self", ",", "lines", ")", ":", "headers", "=", "[", "]", "num_lev", "=", "[", "]", "dates", "=", "[", "]", "# Get indices of headers, and make a list of dates and num_lev", "for", "idx", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "line", "[", "0", "]", "==", "'#'", ":", "year", ",", "month", ",", "day", ",", "hour", "=", "map", "(", "int", ",", "line", "[", "13", ":", "26", "]", ".", "split", "(", ")", ")", "# All soundings have YMD, most have hour", "try", ":", "date", "=", "datetime", ".", "datetime", "(", "year", ",", "month", ",", "day", ",", "hour", ")", "except", "ValueError", ":", "date", "=", "datetime", ".", "datetime", "(", "year", ",", "month", ",", "day", ")", "# Check date", "if", "self", ".", "begin_date", "<=", "date", "<=", "self", ".", "end_date", ":", "headers", ".", "append", "(", "idx", ")", "num_lev", ".", "append", "(", "int", "(", "line", "[", "32", ":", "36", "]", ")", ")", "dates", ".", "append", "(", "date", ")", "if", "date", ">", "self", ".", "end_date", ":", "break", "if", "len", "(", "dates", ")", "==", "0", ":", "# Break if no matched dates.", "# Could improve this later by showing the date range for the station.", "raise", "ValueError", "(", "'No dates match selection.'", ")", "# Compress body of data into a string", "begin_idx", "=", "min", "(", "headers", ")", "end_idx", "=", "max", "(", "headers", ")", "+", "num_lev", "[", "-", "1", "]", "# Make a boolean vector that selects only list indices within the time range", "selector", "=", "np", ".", "zeros", "(", "len", "(", "lines", ")", ",", "dtype", "=", "bool", ")", "selector", "[", "begin_idx", ":", "end_idx", "+", "1", "]", "=", "True", "selector", "[", "headers", "]", "=", "False", "body", "=", "''", ".", "join", "(", "[", "line", "for", "line", "in", "itertools", ".", "compress", "(", "lines", ",", "selector", ")", "]", ")", "selector", "[", "begin_idx", ":", "end_idx", "+", "1", "]", "=", "~", "selector", "[", "begin_idx", ":", "end_idx", "+", "1", "]", "header", "=", "''", ".", "join", "(", "[", "line", "for", "line", "in", "itertools", ".", "compress", "(", "lines", ",", "selector", ")", "]", ")", "# expand date vector to match length of the body dataframe.", "dates_long", "=", "np", ".", "repeat", "(", "dates", ",", "num_lev", ")", "return", "body", ",", "header", ",", "dates_long", ",", "dates" ]
Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file.
[ "Identify", "lines", "containing", "headers", "within", "the", "range", "begin_date", "to", "end_date", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/igra2.py#L121-L174
Unidata/siphon
siphon/simplewebservice/igra2.py
IGRAUpperAir._get_fwf_params
def _get_fwf_params(self): """Produce a dictionary with names, colspecs, and dtype for IGRA2 data. Returns a dict with entries 'body' and 'header'. """ def _cdec(power=1): """Make a function to convert string 'value*10^power' to float.""" def _cdec_power(val): if val in ['-9999', '-8888', '-99999']: return np.nan else: return float(val) / 10**power return _cdec_power def _cflag(val): """Replace alphabetic flags A and B with numeric.""" if val == 'A': return 1 elif val == 'B': return 2 else: return 0 def _ctime(strformat='MMMSS'): """Return a function converting a string from MMMSS or HHMM to seconds.""" def _ctime_strformat(val): time = val.strip().zfill(5) if int(time) < 0: return np.nan elif int(time) == 9999: return np.nan else: if strformat == 'MMMSS': minutes = int(time[0:3]) seconds = int(time[3:5]) time_seconds = minutes * 60 + seconds elif strformat == 'HHMM': hours = int(time[0:2]) minutes = int(time[2:4]) time_seconds = hours * 3600 + minutes * 60 else: sys.exit('Unrecognized time format') return time_seconds return _ctime_strformat def _clatlon(x): n = len(x) deg = x[0:n - 4] dec = x[n - 4:] return float(deg + '.' + dec) if self.suffix == '-drvd.txt': names_body = ['pressure', 'reported_height', 'calculated_height', 'temperature', 'temperature_gradient', 'potential_temperature', 'potential_temperature_gradient', 'virtual_temperature', 'virtual_potential_temperature', 'vapor_pressure', 'saturation_vapor_pressure', 'reported_relative_humidity', 'calculated_relative_humidity', 'u_wind', 'u_wind_gradient', 'v_wind', 'v_wind_gradient', 'refractive_index'] colspecs_body = [(0, 7), (8, 15), (16, 23), (24, 31), (32, 39), (40, 47), (48, 55), (56, 63), (64, 71), (72, 79), (80, 87), (88, 95), (96, 103), (104, 111), (112, 119), (120, 127), (128, 135), (137, 143), (144, 151)] conv_body = {'pressure': _cdec(power=2), 'reported_height': int, 'calculated_height': int, 'temperature': _cdec(), 'temperature_gradient': _cdec(), 'potential_temperature': _cdec(), 'potential_temperature_gradient': _cdec(), 'virtual_temperature': _cdec(), 'virtual_potential_temperature': _cdec(), 'vapor_pressure': _cdec(power=3), 'saturation_vapor_pressure': _cdec(power=3), 'reported_relative_humidity': _cdec(), 'calculated_relative_humidity': _cdec(), 'u_wind': _cdec(), 'u_wind_gradient': _cdec(), 'v_wind': _cdec(), 'v_wind_gradient': _cdec(), 'refractive_index': int} names_header = ['site_id', 'year', 'month', 'day', 'hour', 'release_time', 'number_levels', 'precipitable_water', 'inv_pressure', 'inv_height', 'inv_strength', 'mixed_layer_pressure', 'mixed_layer_height', 'freezing_point_pressure', 'freezing_point_height', 'lcl_pressure', 'lcl_height', 'lfc_pressure', 'lfc_height', 'lnb_pressure', 'lnb_height', 'lifted_index', 'showalter_index', 'k_index', 'total_totals_index', 'cape', 'convective_inhibition'] colspecs_header = [(1, 12), (13, 17), (18, 20), (21, 23), (24, 26), (27, 31), (31, 36), (37, 43), (43, 48), (49, 55), (55, 61), (61, 67), (67, 73), (73, 79), (79, 85), (85, 91), (91, 97), (97, 103), (103, 109), (109, 115), (115, 121), (121, 127), (127, 133), (133, 139), (139, 145), (145, 151), (151, 157)] conv_header = {'site_id': str, 'year': int, 'month': int, 'day': int, 'hour': int, 'release_time': _ctime(strformat='HHMM'), 'number_levels': int, 'precipitable_water': _cdec(power=2), 'inv_pressure': _cdec(power=2), 'inv_height': int, 'inv_strength': _cdec(), 'mixed_layer_pressure': _cdec(power=2), 'mixed_layer_height': int, 'freezing_point_pressure': _cdec(power=2), 'freezing_point_height': int, 'lcl_pressure': _cdec(power=2), 'lcl_height': int, 'lfc_pressure': _cdec(power=2), 'lfc_height': int, 'lnb_pressure': _cdec(power=2), 'lnb_height': int, 'lifted_index': int, 'showalter_index': int, 'k_index': int, 'total_totals_index': int, 'cape': int, 'convective_inhibition': int} na_vals = ['-99999'] else: names_body = ['lvltyp1', 'lvltyp2', 'etime', 'pressure', 'pflag', 'height', 'zflag', 'temperature', 'tflag', 'relative_humidity', 'dewpoint_depression', 'direction', 'speed'] colspecs_body = [(0, 1), (1, 2), (3, 8), (9, 15), (15, 16), (16, 21), (21, 22), (22, 27), (27, 28), (28, 33), (34, 39), (40, 45), (46, 51)] conv_body = {'lvltyp1': int, 'lvltyp2': int, 'etime': _ctime(strformat='MMMSS'), 'pressure': _cdec(power=2), 'pflag': _cflag, 'height': int, 'zflag': _cflag, 'temperature': _cdec(), 'tflag': _cflag, 'relative_humidity': _cdec(), 'dewpoint_depression': _cdec(), 'direction': int, 'speed': _cdec()} names_header = ['site_id', 'year', 'month', 'day', 'hour', 'release_time', 'number_levels', 'pressure_source_code', 'non_pressure_source_code', 'latitude', 'longitude'] colspecs_header = [(1, 12), (13, 17), (18, 20), (21, 23), (24, 26), (27, 31), (32, 36), (37, 45), (46, 54), (55, 62), (63, 71)] na_vals = ['-8888', '-9999'] conv_header = {'release_time': _ctime(strformat='HHMM'), 'number_levels': int, 'latitude': _clatlon, 'longitude': _clatlon} return {'body': {'names': names_body, 'colspecs': colspecs_body, 'converters': conv_body, 'na_values': na_vals, 'index_col': False}, 'header': {'names': names_header, 'colspecs': colspecs_header, 'converters': conv_header, 'na_values': na_vals, 'index_col': False}}
python
def _get_fwf_params(self): """Produce a dictionary with names, colspecs, and dtype for IGRA2 data. Returns a dict with entries 'body' and 'header'. """ def _cdec(power=1): """Make a function to convert string 'value*10^power' to float.""" def _cdec_power(val): if val in ['-9999', '-8888', '-99999']: return np.nan else: return float(val) / 10**power return _cdec_power def _cflag(val): """Replace alphabetic flags A and B with numeric.""" if val == 'A': return 1 elif val == 'B': return 2 else: return 0 def _ctime(strformat='MMMSS'): """Return a function converting a string from MMMSS or HHMM to seconds.""" def _ctime_strformat(val): time = val.strip().zfill(5) if int(time) < 0: return np.nan elif int(time) == 9999: return np.nan else: if strformat == 'MMMSS': minutes = int(time[0:3]) seconds = int(time[3:5]) time_seconds = minutes * 60 + seconds elif strformat == 'HHMM': hours = int(time[0:2]) minutes = int(time[2:4]) time_seconds = hours * 3600 + minutes * 60 else: sys.exit('Unrecognized time format') return time_seconds return _ctime_strformat def _clatlon(x): n = len(x) deg = x[0:n - 4] dec = x[n - 4:] return float(deg + '.' + dec) if self.suffix == '-drvd.txt': names_body = ['pressure', 'reported_height', 'calculated_height', 'temperature', 'temperature_gradient', 'potential_temperature', 'potential_temperature_gradient', 'virtual_temperature', 'virtual_potential_temperature', 'vapor_pressure', 'saturation_vapor_pressure', 'reported_relative_humidity', 'calculated_relative_humidity', 'u_wind', 'u_wind_gradient', 'v_wind', 'v_wind_gradient', 'refractive_index'] colspecs_body = [(0, 7), (8, 15), (16, 23), (24, 31), (32, 39), (40, 47), (48, 55), (56, 63), (64, 71), (72, 79), (80, 87), (88, 95), (96, 103), (104, 111), (112, 119), (120, 127), (128, 135), (137, 143), (144, 151)] conv_body = {'pressure': _cdec(power=2), 'reported_height': int, 'calculated_height': int, 'temperature': _cdec(), 'temperature_gradient': _cdec(), 'potential_temperature': _cdec(), 'potential_temperature_gradient': _cdec(), 'virtual_temperature': _cdec(), 'virtual_potential_temperature': _cdec(), 'vapor_pressure': _cdec(power=3), 'saturation_vapor_pressure': _cdec(power=3), 'reported_relative_humidity': _cdec(), 'calculated_relative_humidity': _cdec(), 'u_wind': _cdec(), 'u_wind_gradient': _cdec(), 'v_wind': _cdec(), 'v_wind_gradient': _cdec(), 'refractive_index': int} names_header = ['site_id', 'year', 'month', 'day', 'hour', 'release_time', 'number_levels', 'precipitable_water', 'inv_pressure', 'inv_height', 'inv_strength', 'mixed_layer_pressure', 'mixed_layer_height', 'freezing_point_pressure', 'freezing_point_height', 'lcl_pressure', 'lcl_height', 'lfc_pressure', 'lfc_height', 'lnb_pressure', 'lnb_height', 'lifted_index', 'showalter_index', 'k_index', 'total_totals_index', 'cape', 'convective_inhibition'] colspecs_header = [(1, 12), (13, 17), (18, 20), (21, 23), (24, 26), (27, 31), (31, 36), (37, 43), (43, 48), (49, 55), (55, 61), (61, 67), (67, 73), (73, 79), (79, 85), (85, 91), (91, 97), (97, 103), (103, 109), (109, 115), (115, 121), (121, 127), (127, 133), (133, 139), (139, 145), (145, 151), (151, 157)] conv_header = {'site_id': str, 'year': int, 'month': int, 'day': int, 'hour': int, 'release_time': _ctime(strformat='HHMM'), 'number_levels': int, 'precipitable_water': _cdec(power=2), 'inv_pressure': _cdec(power=2), 'inv_height': int, 'inv_strength': _cdec(), 'mixed_layer_pressure': _cdec(power=2), 'mixed_layer_height': int, 'freezing_point_pressure': _cdec(power=2), 'freezing_point_height': int, 'lcl_pressure': _cdec(power=2), 'lcl_height': int, 'lfc_pressure': _cdec(power=2), 'lfc_height': int, 'lnb_pressure': _cdec(power=2), 'lnb_height': int, 'lifted_index': int, 'showalter_index': int, 'k_index': int, 'total_totals_index': int, 'cape': int, 'convective_inhibition': int} na_vals = ['-99999'] else: names_body = ['lvltyp1', 'lvltyp2', 'etime', 'pressure', 'pflag', 'height', 'zflag', 'temperature', 'tflag', 'relative_humidity', 'dewpoint_depression', 'direction', 'speed'] colspecs_body = [(0, 1), (1, 2), (3, 8), (9, 15), (15, 16), (16, 21), (21, 22), (22, 27), (27, 28), (28, 33), (34, 39), (40, 45), (46, 51)] conv_body = {'lvltyp1': int, 'lvltyp2': int, 'etime': _ctime(strformat='MMMSS'), 'pressure': _cdec(power=2), 'pflag': _cflag, 'height': int, 'zflag': _cflag, 'temperature': _cdec(), 'tflag': _cflag, 'relative_humidity': _cdec(), 'dewpoint_depression': _cdec(), 'direction': int, 'speed': _cdec()} names_header = ['site_id', 'year', 'month', 'day', 'hour', 'release_time', 'number_levels', 'pressure_source_code', 'non_pressure_source_code', 'latitude', 'longitude'] colspecs_header = [(1, 12), (13, 17), (18, 20), (21, 23), (24, 26), (27, 31), (32, 36), (37, 45), (46, 54), (55, 62), (63, 71)] na_vals = ['-8888', '-9999'] conv_header = {'release_time': _ctime(strformat='HHMM'), 'number_levels': int, 'latitude': _clatlon, 'longitude': _clatlon} return {'body': {'names': names_body, 'colspecs': colspecs_body, 'converters': conv_body, 'na_values': na_vals, 'index_col': False}, 'header': {'names': names_header, 'colspecs': colspecs_header, 'converters': conv_header, 'na_values': na_vals, 'index_col': False}}
[ "def", "_get_fwf_params", "(", "self", ")", ":", "def", "_cdec", "(", "power", "=", "1", ")", ":", "\"\"\"Make a function to convert string 'value*10^power' to float.\"\"\"", "def", "_cdec_power", "(", "val", ")", ":", "if", "val", "in", "[", "'-9999'", ",", "'-8888'", ",", "'-99999'", "]", ":", "return", "np", ".", "nan", "else", ":", "return", "float", "(", "val", ")", "/", "10", "**", "power", "return", "_cdec_power", "def", "_cflag", "(", "val", ")", ":", "\"\"\"Replace alphabetic flags A and B with numeric.\"\"\"", "if", "val", "==", "'A'", ":", "return", "1", "elif", "val", "==", "'B'", ":", "return", "2", "else", ":", "return", "0", "def", "_ctime", "(", "strformat", "=", "'MMMSS'", ")", ":", "\"\"\"Return a function converting a string from MMMSS or HHMM to seconds.\"\"\"", "def", "_ctime_strformat", "(", "val", ")", ":", "time", "=", "val", ".", "strip", "(", ")", ".", "zfill", "(", "5", ")", "if", "int", "(", "time", ")", "<", "0", ":", "return", "np", ".", "nan", "elif", "int", "(", "time", ")", "==", "9999", ":", "return", "np", ".", "nan", "else", ":", "if", "strformat", "==", "'MMMSS'", ":", "minutes", "=", "int", "(", "time", "[", "0", ":", "3", "]", ")", "seconds", "=", "int", "(", "time", "[", "3", ":", "5", "]", ")", "time_seconds", "=", "minutes", "*", "60", "+", "seconds", "elif", "strformat", "==", "'HHMM'", ":", "hours", "=", "int", "(", "time", "[", "0", ":", "2", "]", ")", "minutes", "=", "int", "(", "time", "[", "2", ":", "4", "]", ")", "time_seconds", "=", "hours", "*", "3600", "+", "minutes", "*", "60", "else", ":", "sys", ".", "exit", "(", "'Unrecognized time format'", ")", "return", "time_seconds", "return", "_ctime_strformat", "def", "_clatlon", "(", "x", ")", ":", "n", "=", "len", "(", "x", ")", "deg", "=", "x", "[", "0", ":", "n", "-", "4", "]", "dec", "=", "x", "[", "n", "-", "4", ":", "]", "return", "float", "(", "deg", "+", "'.'", "+", "dec", ")", "if", "self", ".", "suffix", "==", "'-drvd.txt'", ":", "names_body", "=", "[", "'pressure'", ",", "'reported_height'", ",", "'calculated_height'", ",", "'temperature'", ",", "'temperature_gradient'", ",", "'potential_temperature'", ",", "'potential_temperature_gradient'", ",", "'virtual_temperature'", ",", "'virtual_potential_temperature'", ",", "'vapor_pressure'", ",", "'saturation_vapor_pressure'", ",", "'reported_relative_humidity'", ",", "'calculated_relative_humidity'", ",", "'u_wind'", ",", "'u_wind_gradient'", ",", "'v_wind'", ",", "'v_wind_gradient'", ",", "'refractive_index'", "]", "colspecs_body", "=", "[", "(", "0", ",", "7", ")", ",", "(", "8", ",", "15", ")", ",", "(", "16", ",", "23", ")", ",", "(", "24", ",", "31", ")", ",", "(", "32", ",", "39", ")", ",", "(", "40", ",", "47", ")", ",", "(", "48", ",", "55", ")", ",", "(", "56", ",", "63", ")", ",", "(", "64", ",", "71", ")", ",", "(", "72", ",", "79", ")", ",", "(", "80", ",", "87", ")", ",", "(", "88", ",", "95", ")", ",", "(", "96", ",", "103", ")", ",", "(", "104", ",", "111", ")", ",", "(", "112", ",", "119", ")", ",", "(", "120", ",", "127", ")", ",", "(", "128", ",", "135", ")", ",", "(", "137", ",", "143", ")", ",", "(", "144", ",", "151", ")", "]", "conv_body", "=", "{", "'pressure'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'reported_height'", ":", "int", ",", "'calculated_height'", ":", "int", ",", "'temperature'", ":", "_cdec", "(", ")", ",", "'temperature_gradient'", ":", "_cdec", "(", ")", ",", "'potential_temperature'", ":", "_cdec", "(", ")", ",", "'potential_temperature_gradient'", ":", "_cdec", "(", ")", ",", "'virtual_temperature'", ":", "_cdec", "(", ")", ",", "'virtual_potential_temperature'", ":", "_cdec", "(", ")", ",", "'vapor_pressure'", ":", "_cdec", "(", "power", "=", "3", ")", ",", "'saturation_vapor_pressure'", ":", "_cdec", "(", "power", "=", "3", ")", ",", "'reported_relative_humidity'", ":", "_cdec", "(", ")", ",", "'calculated_relative_humidity'", ":", "_cdec", "(", ")", ",", "'u_wind'", ":", "_cdec", "(", ")", ",", "'u_wind_gradient'", ":", "_cdec", "(", ")", ",", "'v_wind'", ":", "_cdec", "(", ")", ",", "'v_wind_gradient'", ":", "_cdec", "(", ")", ",", "'refractive_index'", ":", "int", "}", "names_header", "=", "[", "'site_id'", ",", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'release_time'", ",", "'number_levels'", ",", "'precipitable_water'", ",", "'inv_pressure'", ",", "'inv_height'", ",", "'inv_strength'", ",", "'mixed_layer_pressure'", ",", "'mixed_layer_height'", ",", "'freezing_point_pressure'", ",", "'freezing_point_height'", ",", "'lcl_pressure'", ",", "'lcl_height'", ",", "'lfc_pressure'", ",", "'lfc_height'", ",", "'lnb_pressure'", ",", "'lnb_height'", ",", "'lifted_index'", ",", "'showalter_index'", ",", "'k_index'", ",", "'total_totals_index'", ",", "'cape'", ",", "'convective_inhibition'", "]", "colspecs_header", "=", "[", "(", "1", ",", "12", ")", ",", "(", "13", ",", "17", ")", ",", "(", "18", ",", "20", ")", ",", "(", "21", ",", "23", ")", ",", "(", "24", ",", "26", ")", ",", "(", "27", ",", "31", ")", ",", "(", "31", ",", "36", ")", ",", "(", "37", ",", "43", ")", ",", "(", "43", ",", "48", ")", ",", "(", "49", ",", "55", ")", ",", "(", "55", ",", "61", ")", ",", "(", "61", ",", "67", ")", ",", "(", "67", ",", "73", ")", ",", "(", "73", ",", "79", ")", ",", "(", "79", ",", "85", ")", ",", "(", "85", ",", "91", ")", ",", "(", "91", ",", "97", ")", ",", "(", "97", ",", "103", ")", ",", "(", "103", ",", "109", ")", ",", "(", "109", ",", "115", ")", ",", "(", "115", ",", "121", ")", ",", "(", "121", ",", "127", ")", ",", "(", "127", ",", "133", ")", ",", "(", "133", ",", "139", ")", ",", "(", "139", ",", "145", ")", ",", "(", "145", ",", "151", ")", ",", "(", "151", ",", "157", ")", "]", "conv_header", "=", "{", "'site_id'", ":", "str", ",", "'year'", ":", "int", ",", "'month'", ":", "int", ",", "'day'", ":", "int", ",", "'hour'", ":", "int", ",", "'release_time'", ":", "_ctime", "(", "strformat", "=", "'HHMM'", ")", ",", "'number_levels'", ":", "int", ",", "'precipitable_water'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'inv_pressure'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'inv_height'", ":", "int", ",", "'inv_strength'", ":", "_cdec", "(", ")", ",", "'mixed_layer_pressure'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'mixed_layer_height'", ":", "int", ",", "'freezing_point_pressure'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'freezing_point_height'", ":", "int", ",", "'lcl_pressure'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'lcl_height'", ":", "int", ",", "'lfc_pressure'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'lfc_height'", ":", "int", ",", "'lnb_pressure'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'lnb_height'", ":", "int", ",", "'lifted_index'", ":", "int", ",", "'showalter_index'", ":", "int", ",", "'k_index'", ":", "int", ",", "'total_totals_index'", ":", "int", ",", "'cape'", ":", "int", ",", "'convective_inhibition'", ":", "int", "}", "na_vals", "=", "[", "'-99999'", "]", "else", ":", "names_body", "=", "[", "'lvltyp1'", ",", "'lvltyp2'", ",", "'etime'", ",", "'pressure'", ",", "'pflag'", ",", "'height'", ",", "'zflag'", ",", "'temperature'", ",", "'tflag'", ",", "'relative_humidity'", ",", "'dewpoint_depression'", ",", "'direction'", ",", "'speed'", "]", "colspecs_body", "=", "[", "(", "0", ",", "1", ")", ",", "(", "1", ",", "2", ")", ",", "(", "3", ",", "8", ")", ",", "(", "9", ",", "15", ")", ",", "(", "15", ",", "16", ")", ",", "(", "16", ",", "21", ")", ",", "(", "21", ",", "22", ")", ",", "(", "22", ",", "27", ")", ",", "(", "27", ",", "28", ")", ",", "(", "28", ",", "33", ")", ",", "(", "34", ",", "39", ")", ",", "(", "40", ",", "45", ")", ",", "(", "46", ",", "51", ")", "]", "conv_body", "=", "{", "'lvltyp1'", ":", "int", ",", "'lvltyp2'", ":", "int", ",", "'etime'", ":", "_ctime", "(", "strformat", "=", "'MMMSS'", ")", ",", "'pressure'", ":", "_cdec", "(", "power", "=", "2", ")", ",", "'pflag'", ":", "_cflag", ",", "'height'", ":", "int", ",", "'zflag'", ":", "_cflag", ",", "'temperature'", ":", "_cdec", "(", ")", ",", "'tflag'", ":", "_cflag", ",", "'relative_humidity'", ":", "_cdec", "(", ")", ",", "'dewpoint_depression'", ":", "_cdec", "(", ")", ",", "'direction'", ":", "int", ",", "'speed'", ":", "_cdec", "(", ")", "}", "names_header", "=", "[", "'site_id'", ",", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'release_time'", ",", "'number_levels'", ",", "'pressure_source_code'", ",", "'non_pressure_source_code'", ",", "'latitude'", ",", "'longitude'", "]", "colspecs_header", "=", "[", "(", "1", ",", "12", ")", ",", "(", "13", ",", "17", ")", ",", "(", "18", ",", "20", ")", ",", "(", "21", ",", "23", ")", ",", "(", "24", ",", "26", ")", ",", "(", "27", ",", "31", ")", ",", "(", "32", ",", "36", ")", ",", "(", "37", ",", "45", ")", ",", "(", "46", ",", "54", ")", ",", "(", "55", ",", "62", ")", ",", "(", "63", ",", "71", ")", "]", "na_vals", "=", "[", "'-8888'", ",", "'-9999'", "]", "conv_header", "=", "{", "'release_time'", ":", "_ctime", "(", "strformat", "=", "'HHMM'", ")", ",", "'number_levels'", ":", "int", ",", "'latitude'", ":", "_clatlon", ",", "'longitude'", ":", "_clatlon", "}", "return", "{", "'body'", ":", "{", "'names'", ":", "names_body", ",", "'colspecs'", ":", "colspecs_body", ",", "'converters'", ":", "conv_body", ",", "'na_values'", ":", "na_vals", ",", "'index_col'", ":", "False", "}", ",", "'header'", ":", "{", "'names'", ":", "names_header", ",", "'colspecs'", ":", "colspecs_header", ",", "'converters'", ":", "conv_header", ",", "'na_values'", ":", "na_vals", ",", "'index_col'", ":", "False", "}", "}" ]
Produce a dictionary with names, colspecs, and dtype for IGRA2 data. Returns a dict with entries 'body' and 'header'.
[ "Produce", "a", "dictionary", "with", "names", "colspecs", "and", "dtype", "for", "IGRA2", "data", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/igra2.py#L176-L356
Unidata/siphon
siphon/simplewebservice/igra2.py
IGRAUpperAir._clean_body_df
def _clean_body_df(self, df): """Format the dataframe, remove empty rows, and add units attribute.""" if self.suffix == '-drvd.txt': df = df.dropna(subset=('temperature', 'reported_relative_humidity', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) df.units = {'pressure': 'hPa', 'reported_height': 'meter', 'calculated_height': 'meter', 'temperature': 'Kelvin', 'temperature_gradient': 'Kelvin / kilometer', 'potential_temperature': 'Kelvin', 'potential_temperature_gradient': 'Kelvin / kilometer', 'virtual_temperature': 'Kelvin', 'virtual_potential_temperature': 'Kelvin', 'vapor_pressure': 'Pascal', 'saturation_vapor_pressure': 'Pascal', 'reported_relative_humidity': 'percent', 'calculated_relative_humidity': 'percent', 'u_wind': 'meter / second', 'u_wind_gradient': '(meter / second) / kilometer)', 'v_wind': 'meter / second', 'v_wind_gradient': '(meter / second) / kilometer)', 'refractive_index': 'unitless'} else: df['u_wind'], df['v_wind'] = get_wind_components(df['speed'], np.deg2rad(df['direction'])) df['u_wind'] = np.round(df['u_wind'], 1) df['v_wind'] = np.round(df['v_wind'], 1) df = df.dropna(subset=('temperature', 'direction', 'speed', 'dewpoint_depression', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) df['dewpoint'] = df['temperature'] - df['dewpoint_depression'] df.drop('dewpoint_depression', axis=1, inplace=True) df.units = {'etime': 'second', 'pressure': 'hPa', 'height': 'meter', 'temperature': 'degC', 'dewpoint': 'degC', 'direction': 'degrees', 'speed': 'meter / second', 'u_wind': 'meter / second', 'v_wind': 'meter / second'} return df
python
def _clean_body_df(self, df): """Format the dataframe, remove empty rows, and add units attribute.""" if self.suffix == '-drvd.txt': df = df.dropna(subset=('temperature', 'reported_relative_humidity', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) df.units = {'pressure': 'hPa', 'reported_height': 'meter', 'calculated_height': 'meter', 'temperature': 'Kelvin', 'temperature_gradient': 'Kelvin / kilometer', 'potential_temperature': 'Kelvin', 'potential_temperature_gradient': 'Kelvin / kilometer', 'virtual_temperature': 'Kelvin', 'virtual_potential_temperature': 'Kelvin', 'vapor_pressure': 'Pascal', 'saturation_vapor_pressure': 'Pascal', 'reported_relative_humidity': 'percent', 'calculated_relative_humidity': 'percent', 'u_wind': 'meter / second', 'u_wind_gradient': '(meter / second) / kilometer)', 'v_wind': 'meter / second', 'v_wind_gradient': '(meter / second) / kilometer)', 'refractive_index': 'unitless'} else: df['u_wind'], df['v_wind'] = get_wind_components(df['speed'], np.deg2rad(df['direction'])) df['u_wind'] = np.round(df['u_wind'], 1) df['v_wind'] = np.round(df['v_wind'], 1) df = df.dropna(subset=('temperature', 'direction', 'speed', 'dewpoint_depression', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) df['dewpoint'] = df['temperature'] - df['dewpoint_depression'] df.drop('dewpoint_depression', axis=1, inplace=True) df.units = {'etime': 'second', 'pressure': 'hPa', 'height': 'meter', 'temperature': 'degC', 'dewpoint': 'degC', 'direction': 'degrees', 'speed': 'meter / second', 'u_wind': 'meter / second', 'v_wind': 'meter / second'} return df
[ "def", "_clean_body_df", "(", "self", ",", "df", ")", ":", "if", "self", ".", "suffix", "==", "'-drvd.txt'", ":", "df", "=", "df", ".", "dropna", "(", "subset", "=", "(", "'temperature'", ",", "'reported_relative_humidity'", ",", "'u_wind'", ",", "'v_wind'", ")", ",", "how", "=", "'all'", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "df", ".", "units", "=", "{", "'pressure'", ":", "'hPa'", ",", "'reported_height'", ":", "'meter'", ",", "'calculated_height'", ":", "'meter'", ",", "'temperature'", ":", "'Kelvin'", ",", "'temperature_gradient'", ":", "'Kelvin / kilometer'", ",", "'potential_temperature'", ":", "'Kelvin'", ",", "'potential_temperature_gradient'", ":", "'Kelvin / kilometer'", ",", "'virtual_temperature'", ":", "'Kelvin'", ",", "'virtual_potential_temperature'", ":", "'Kelvin'", ",", "'vapor_pressure'", ":", "'Pascal'", ",", "'saturation_vapor_pressure'", ":", "'Pascal'", ",", "'reported_relative_humidity'", ":", "'percent'", ",", "'calculated_relative_humidity'", ":", "'percent'", ",", "'u_wind'", ":", "'meter / second'", ",", "'u_wind_gradient'", ":", "'(meter / second) / kilometer)'", ",", "'v_wind'", ":", "'meter / second'", ",", "'v_wind_gradient'", ":", "'(meter / second) / kilometer)'", ",", "'refractive_index'", ":", "'unitless'", "}", "else", ":", "df", "[", "'u_wind'", "]", ",", "df", "[", "'v_wind'", "]", "=", "get_wind_components", "(", "df", "[", "'speed'", "]", ",", "np", ".", "deg2rad", "(", "df", "[", "'direction'", "]", ")", ")", "df", "[", "'u_wind'", "]", "=", "np", ".", "round", "(", "df", "[", "'u_wind'", "]", ",", "1", ")", "df", "[", "'v_wind'", "]", "=", "np", ".", "round", "(", "df", "[", "'v_wind'", "]", ",", "1", ")", "df", "=", "df", ".", "dropna", "(", "subset", "=", "(", "'temperature'", ",", "'direction'", ",", "'speed'", ",", "'dewpoint_depression'", ",", "'u_wind'", ",", "'v_wind'", ")", ",", "how", "=", "'all'", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "df", "[", "'dewpoint'", "]", "=", "df", "[", "'temperature'", "]", "-", "df", "[", "'dewpoint_depression'", "]", "df", ".", "drop", "(", "'dewpoint_depression'", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "df", ".", "units", "=", "{", "'etime'", ":", "'second'", ",", "'pressure'", ":", "'hPa'", ",", "'height'", ":", "'meter'", ",", "'temperature'", ":", "'degC'", ",", "'dewpoint'", ":", "'degC'", ",", "'direction'", ":", "'degrees'", ",", "'speed'", ":", "'meter / second'", ",", "'u_wind'", ":", "'meter / second'", ",", "'v_wind'", ":", "'meter / second'", "}", "return", "df" ]
Format the dataframe, remove empty rows, and add units attribute.
[ "Format", "the", "dataframe", "remove", "empty", "rows", "and", "add", "units", "attribute", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/igra2.py#L358-L407
Unidata/siphon
siphon/simplewebservice/igra2.py
IGRAUpperAir._clean_header_df
def _clean_header_df(self, df): """Format the header dataframe and add units.""" if self.suffix == '-drvd.txt': df.units = {'release_time': 'second', 'precipitable_water': 'millimeter', 'inv_pressure': 'hPa', 'inv_height': 'meter', 'inv_strength': 'Kelvin', 'mixed_layer_pressure': 'hPa', 'mixed_layer_height': 'meter', 'freezing_point_pressure': 'hPa', 'freezing_point_height': 'meter', 'lcl_pressure': 'hPa', 'lcl_height': 'meter', 'lfc_pressure': 'hPa', 'lfc_height': 'meter', 'lnb_pressure': 'hPa', 'lnb_height': 'meter', 'lifted_index': 'degC', 'showalter_index': 'degC', 'k_index': 'degC', 'total_totals_index': 'degC', 'cape': 'Joule / kilogram', 'convective_inhibition': 'Joule / kilogram'} else: df.units = {'release_time': 'second', 'latitude': 'degrees', 'longitude': 'degrees'} return df
python
def _clean_header_df(self, df): """Format the header dataframe and add units.""" if self.suffix == '-drvd.txt': df.units = {'release_time': 'second', 'precipitable_water': 'millimeter', 'inv_pressure': 'hPa', 'inv_height': 'meter', 'inv_strength': 'Kelvin', 'mixed_layer_pressure': 'hPa', 'mixed_layer_height': 'meter', 'freezing_point_pressure': 'hPa', 'freezing_point_height': 'meter', 'lcl_pressure': 'hPa', 'lcl_height': 'meter', 'lfc_pressure': 'hPa', 'lfc_height': 'meter', 'lnb_pressure': 'hPa', 'lnb_height': 'meter', 'lifted_index': 'degC', 'showalter_index': 'degC', 'k_index': 'degC', 'total_totals_index': 'degC', 'cape': 'Joule / kilogram', 'convective_inhibition': 'Joule / kilogram'} else: df.units = {'release_time': 'second', 'latitude': 'degrees', 'longitude': 'degrees'} return df
[ "def", "_clean_header_df", "(", "self", ",", "df", ")", ":", "if", "self", ".", "suffix", "==", "'-drvd.txt'", ":", "df", ".", "units", "=", "{", "'release_time'", ":", "'second'", ",", "'precipitable_water'", ":", "'millimeter'", ",", "'inv_pressure'", ":", "'hPa'", ",", "'inv_height'", ":", "'meter'", ",", "'inv_strength'", ":", "'Kelvin'", ",", "'mixed_layer_pressure'", ":", "'hPa'", ",", "'mixed_layer_height'", ":", "'meter'", ",", "'freezing_point_pressure'", ":", "'hPa'", ",", "'freezing_point_height'", ":", "'meter'", ",", "'lcl_pressure'", ":", "'hPa'", ",", "'lcl_height'", ":", "'meter'", ",", "'lfc_pressure'", ":", "'hPa'", ",", "'lfc_height'", ":", "'meter'", ",", "'lnb_pressure'", ":", "'hPa'", ",", "'lnb_height'", ":", "'meter'", ",", "'lifted_index'", ":", "'degC'", ",", "'showalter_index'", ":", "'degC'", ",", "'k_index'", ":", "'degC'", ",", "'total_totals_index'", ":", "'degC'", ",", "'cape'", ":", "'Joule / kilogram'", ",", "'convective_inhibition'", ":", "'Joule / kilogram'", "}", "else", ":", "df", ".", "units", "=", "{", "'release_time'", ":", "'second'", ",", "'latitude'", ":", "'degrees'", ",", "'longitude'", ":", "'degrees'", "}", "return", "df" ]
Format the header dataframe and add units.
[ "Format", "the", "header", "dataframe", "and", "add", "units", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/igra2.py#L409-L439
Unidata/siphon
siphon/simplewebservice/ndbc.py
NDBC.realtime_observations
def realtime_observations(cls, buoy, data_type='txt'): """Retrieve the realtime buoy data from NDBC. Parameters ---------- buoy : str Name of buoy data_type : str Type of data requested, must be one of 'txt' standard meteorological data 'drift' meteorological data from drifting buoys and limited moored buoy data mainly from international partners 'cwind' continuous winds data (10 minute average) 'spec' spectral wave summaries 'ocean' oceanographic data 'srad' solar radiation data 'dart' water column height 'supl' supplemental measurements data 'rain' hourly rain data Returns ------- Raw data string """ endpoint = cls() parsers = {'txt': endpoint._parse_met, 'drift': endpoint._parse_drift, 'cwind': endpoint._parse_cwind, 'spec': endpoint._parse_spec, 'ocean': endpoint._parse_ocean, 'srad': endpoint._parse_srad, 'dart': endpoint._parse_dart, 'supl': endpoint._parse_supl, 'rain': endpoint._parse_rain} if data_type not in parsers: raise KeyError('Data type must be txt, drift, cwind, spec, ocean, srad, dart,' 'supl, or rain for parsed realtime data.') raw_data = endpoint.raw_buoy_data(buoy, data_type=data_type) return parsers[data_type](raw_data)
python
def realtime_observations(cls, buoy, data_type='txt'): """Retrieve the realtime buoy data from NDBC. Parameters ---------- buoy : str Name of buoy data_type : str Type of data requested, must be one of 'txt' standard meteorological data 'drift' meteorological data from drifting buoys and limited moored buoy data mainly from international partners 'cwind' continuous winds data (10 minute average) 'spec' spectral wave summaries 'ocean' oceanographic data 'srad' solar radiation data 'dart' water column height 'supl' supplemental measurements data 'rain' hourly rain data Returns ------- Raw data string """ endpoint = cls() parsers = {'txt': endpoint._parse_met, 'drift': endpoint._parse_drift, 'cwind': endpoint._parse_cwind, 'spec': endpoint._parse_spec, 'ocean': endpoint._parse_ocean, 'srad': endpoint._parse_srad, 'dart': endpoint._parse_dart, 'supl': endpoint._parse_supl, 'rain': endpoint._parse_rain} if data_type not in parsers: raise KeyError('Data type must be txt, drift, cwind, spec, ocean, srad, dart,' 'supl, or rain for parsed realtime data.') raw_data = endpoint.raw_buoy_data(buoy, data_type=data_type) return parsers[data_type](raw_data)
[ "def", "realtime_observations", "(", "cls", ",", "buoy", ",", "data_type", "=", "'txt'", ")", ":", "endpoint", "=", "cls", "(", ")", "parsers", "=", "{", "'txt'", ":", "endpoint", ".", "_parse_met", ",", "'drift'", ":", "endpoint", ".", "_parse_drift", ",", "'cwind'", ":", "endpoint", ".", "_parse_cwind", ",", "'spec'", ":", "endpoint", ".", "_parse_spec", ",", "'ocean'", ":", "endpoint", ".", "_parse_ocean", ",", "'srad'", ":", "endpoint", ".", "_parse_srad", ",", "'dart'", ":", "endpoint", ".", "_parse_dart", ",", "'supl'", ":", "endpoint", ".", "_parse_supl", ",", "'rain'", ":", "endpoint", ".", "_parse_rain", "}", "if", "data_type", "not", "in", "parsers", ":", "raise", "KeyError", "(", "'Data type must be txt, drift, cwind, spec, ocean, srad, dart,'", "'supl, or rain for parsed realtime data.'", ")", "raw_data", "=", "endpoint", ".", "raw_buoy_data", "(", "buoy", ",", "data_type", "=", "data_type", ")", "return", "parsers", "[", "data_type", "]", "(", "raw_data", ")" ]
Retrieve the realtime buoy data from NDBC. Parameters ---------- buoy : str Name of buoy data_type : str Type of data requested, must be one of 'txt' standard meteorological data 'drift' meteorological data from drifting buoys and limited moored buoy data mainly from international partners 'cwind' continuous winds data (10 minute average) 'spec' spectral wave summaries 'ocean' oceanographic data 'srad' solar radiation data 'dart' water column height 'supl' supplemental measurements data 'rain' hourly rain data Returns ------- Raw data string
[ "Retrieve", "the", "realtime", "buoy", "data", "from", "NDBC", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/ndbc.py#L26-L67
Unidata/siphon
siphon/simplewebservice/ndbc.py
NDBC._parse_met
def _parse_met(content): """Parse standard meteorological data from NDBC buoys. Parameters ---------- content : str Data to parse Returns ------- :class:`pandas.DataFrame` containing the data """ col_names = ['year', 'month', 'day', 'hour', 'minute', 'wind_direction', 'wind_speed', 'wind_gust', 'wave_height', 'dominant_wave_period', 'average_wave_period', 'dominant_wave_direction', 'pressure', 'air_temperature', 'water_temperature', 'dewpoint', 'visibility', '3hr_pressure_tendency', 'water_level_above_mean'] col_units = {'wind_direction': 'degrees', 'wind_speed': 'meters/second', 'wind_gust': 'meters/second', 'wave_height': 'meters', 'dominant_wave_period': 'seconds', 'average_wave_period': 'seconds', 'dominant_wave_direction': 'degrees', 'pressure': 'hPa', 'air_temperature': 'degC', 'water_temperature': 'degC', 'dewpoint': 'degC', 'visibility': 'nautical_mile', '3hr_pressure_tendency': 'hPa', 'water_level_above_mean': 'feet', 'time': None} df = pd.read_table(StringIO(content), comment='#', na_values='MM', names=col_names, sep=r'\s+') df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True) df = df.drop(columns=['year', 'month', 'day', 'hour', 'minute']) df.units = col_units return df
python
def _parse_met(content): """Parse standard meteorological data from NDBC buoys. Parameters ---------- content : str Data to parse Returns ------- :class:`pandas.DataFrame` containing the data """ col_names = ['year', 'month', 'day', 'hour', 'minute', 'wind_direction', 'wind_speed', 'wind_gust', 'wave_height', 'dominant_wave_period', 'average_wave_period', 'dominant_wave_direction', 'pressure', 'air_temperature', 'water_temperature', 'dewpoint', 'visibility', '3hr_pressure_tendency', 'water_level_above_mean'] col_units = {'wind_direction': 'degrees', 'wind_speed': 'meters/second', 'wind_gust': 'meters/second', 'wave_height': 'meters', 'dominant_wave_period': 'seconds', 'average_wave_period': 'seconds', 'dominant_wave_direction': 'degrees', 'pressure': 'hPa', 'air_temperature': 'degC', 'water_temperature': 'degC', 'dewpoint': 'degC', 'visibility': 'nautical_mile', '3hr_pressure_tendency': 'hPa', 'water_level_above_mean': 'feet', 'time': None} df = pd.read_table(StringIO(content), comment='#', na_values='MM', names=col_names, sep=r'\s+') df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True) df = df.drop(columns=['year', 'month', 'day', 'hour', 'minute']) df.units = col_units return df
[ "def", "_parse_met", "(", "content", ")", ":", "col_names", "=", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'minute'", ",", "'wind_direction'", ",", "'wind_speed'", ",", "'wind_gust'", ",", "'wave_height'", ",", "'dominant_wave_period'", ",", "'average_wave_period'", ",", "'dominant_wave_direction'", ",", "'pressure'", ",", "'air_temperature'", ",", "'water_temperature'", ",", "'dewpoint'", ",", "'visibility'", ",", "'3hr_pressure_tendency'", ",", "'water_level_above_mean'", "]", "col_units", "=", "{", "'wind_direction'", ":", "'degrees'", ",", "'wind_speed'", ":", "'meters/second'", ",", "'wind_gust'", ":", "'meters/second'", ",", "'wave_height'", ":", "'meters'", ",", "'dominant_wave_period'", ":", "'seconds'", ",", "'average_wave_period'", ":", "'seconds'", ",", "'dominant_wave_direction'", ":", "'degrees'", ",", "'pressure'", ":", "'hPa'", ",", "'air_temperature'", ":", "'degC'", ",", "'water_temperature'", ":", "'degC'", ",", "'dewpoint'", ":", "'degC'", ",", "'visibility'", ":", "'nautical_mile'", ",", "'3hr_pressure_tendency'", ":", "'hPa'", ",", "'water_level_above_mean'", ":", "'feet'", ",", "'time'", ":", "None", "}", "df", "=", "pd", ".", "read_table", "(", "StringIO", "(", "content", ")", ",", "comment", "=", "'#'", ",", "na_values", "=", "'MM'", ",", "names", "=", "col_names", ",", "sep", "=", "r'\\s+'", ")", "df", "[", "'time'", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'minute'", "]", "]", ",", "utc", "=", "True", ")", "df", "=", "df", ".", "drop", "(", "columns", "=", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'minute'", "]", ")", "df", ".", "units", "=", "col_units", "return", "df" ]
Parse standard meteorological data from NDBC buoys. Parameters ---------- content : str Data to parse Returns ------- :class:`pandas.DataFrame` containing the data
[ "Parse", "standard", "meteorological", "data", "from", "NDBC", "buoys", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/ndbc.py#L70-L111
Unidata/siphon
siphon/simplewebservice/ndbc.py
NDBC._parse_supl
def _parse_supl(content): """Parse supplemental measurements data. Parameters ---------- content : str Data to parse Returns ------- :class:`pandas.DataFrame` containing the data """ col_names = ['year', 'month', 'day', 'hour', 'minute', 'hourly_low_pressure', 'hourly_low_pressure_time', 'hourly_high_wind', 'hourly_high_wind_direction', 'hourly_high_wind_time'] col_units = {'hourly_low_pressure': 'hPa', 'hourly_low_pressure_time': None, 'hourly_high_wind': 'meters/second', 'hourly_high_wind_direction': 'degrees', 'hourly_high_wind_time': None, 'time': None} df = pd.read_table(StringIO(content), comment='#', na_values='MM', names=col_names, sep=r'\s+') df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True) df['hours'] = np.floor(df['hourly_low_pressure_time'] / 100) df['minutes'] = df['hourly_low_pressure_time'] - df['hours'] * 100 df['hours'] = df['hours'].replace(99, np.nan) df['minutes'] = df['minutes'].replace(99, np.nan) df['hourly_low_pressure_time'] = pd.to_datetime(df[['year', 'month', 'day', 'hours', 'minutes']], utc=True) df['hours'] = np.floor(df['hourly_high_wind_time'] / 100) df['minutes'] = df['hourly_high_wind_time'] - df['hours'] * 100 df['hours'] = df['hours'].replace(99, np.nan) df['minutes'] = df['minutes'].replace(99, np.nan) df['hourly_high_wind_time'] = pd.to_datetime(df[['year', 'month', 'day', 'hours', 'minutes']], utc=True) df = df.drop(columns=['year', 'month', 'day', 'hour', 'minute', 'hours', 'minutes']) df.units = col_units return df
python
def _parse_supl(content): """Parse supplemental measurements data. Parameters ---------- content : str Data to parse Returns ------- :class:`pandas.DataFrame` containing the data """ col_names = ['year', 'month', 'day', 'hour', 'minute', 'hourly_low_pressure', 'hourly_low_pressure_time', 'hourly_high_wind', 'hourly_high_wind_direction', 'hourly_high_wind_time'] col_units = {'hourly_low_pressure': 'hPa', 'hourly_low_pressure_time': None, 'hourly_high_wind': 'meters/second', 'hourly_high_wind_direction': 'degrees', 'hourly_high_wind_time': None, 'time': None} df = pd.read_table(StringIO(content), comment='#', na_values='MM', names=col_names, sep=r'\s+') df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True) df['hours'] = np.floor(df['hourly_low_pressure_time'] / 100) df['minutes'] = df['hourly_low_pressure_time'] - df['hours'] * 100 df['hours'] = df['hours'].replace(99, np.nan) df['minutes'] = df['minutes'].replace(99, np.nan) df['hourly_low_pressure_time'] = pd.to_datetime(df[['year', 'month', 'day', 'hours', 'minutes']], utc=True) df['hours'] = np.floor(df['hourly_high_wind_time'] / 100) df['minutes'] = df['hourly_high_wind_time'] - df['hours'] * 100 df['hours'] = df['hours'].replace(99, np.nan) df['minutes'] = df['minutes'].replace(99, np.nan) df['hourly_high_wind_time'] = pd.to_datetime(df[['year', 'month', 'day', 'hours', 'minutes']], utc=True) df = df.drop(columns=['year', 'month', 'day', 'hour', 'minute', 'hours', 'minutes']) df.units = col_units return df
[ "def", "_parse_supl", "(", "content", ")", ":", "col_names", "=", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'minute'", ",", "'hourly_low_pressure'", ",", "'hourly_low_pressure_time'", ",", "'hourly_high_wind'", ",", "'hourly_high_wind_direction'", ",", "'hourly_high_wind_time'", "]", "col_units", "=", "{", "'hourly_low_pressure'", ":", "'hPa'", ",", "'hourly_low_pressure_time'", ":", "None", ",", "'hourly_high_wind'", ":", "'meters/second'", ",", "'hourly_high_wind_direction'", ":", "'degrees'", ",", "'hourly_high_wind_time'", ":", "None", ",", "'time'", ":", "None", "}", "df", "=", "pd", ".", "read_table", "(", "StringIO", "(", "content", ")", ",", "comment", "=", "'#'", ",", "na_values", "=", "'MM'", ",", "names", "=", "col_names", ",", "sep", "=", "r'\\s+'", ")", "df", "[", "'time'", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'minute'", "]", "]", ",", "utc", "=", "True", ")", "df", "[", "'hours'", "]", "=", "np", ".", "floor", "(", "df", "[", "'hourly_low_pressure_time'", "]", "/", "100", ")", "df", "[", "'minutes'", "]", "=", "df", "[", "'hourly_low_pressure_time'", "]", "-", "df", "[", "'hours'", "]", "*", "100", "df", "[", "'hours'", "]", "=", "df", "[", "'hours'", "]", ".", "replace", "(", "99", ",", "np", ".", "nan", ")", "df", "[", "'minutes'", "]", "=", "df", "[", "'minutes'", "]", ".", "replace", "(", "99", ",", "np", ".", "nan", ")", "df", "[", "'hourly_low_pressure_time'", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hours'", ",", "'minutes'", "]", "]", ",", "utc", "=", "True", ")", "df", "[", "'hours'", "]", "=", "np", ".", "floor", "(", "df", "[", "'hourly_high_wind_time'", "]", "/", "100", ")", "df", "[", "'minutes'", "]", "=", "df", "[", "'hourly_high_wind_time'", "]", "-", "df", "[", "'hours'", "]", "*", "100", "df", "[", "'hours'", "]", "=", "df", "[", "'hours'", "]", ".", "replace", "(", "99", ",", "np", ".", "nan", ")", "df", "[", "'minutes'", "]", "=", "df", "[", "'minutes'", "]", ".", "replace", "(", "99", ",", "np", ".", "nan", ")", "df", "[", "'hourly_high_wind_time'", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hours'", ",", "'minutes'", "]", "]", ",", "utc", "=", "True", ")", "df", "=", "df", ".", "drop", "(", "columns", "=", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'minute'", ",", "'hours'", ",", "'minutes'", "]", ")", "df", ".", "units", "=", "col_units", "return", "df" ]
Parse supplemental measurements data. Parameters ---------- content : str Data to parse Returns ------- :class:`pandas.DataFrame` containing the data
[ "Parse", "supplemental", "measurements", "data", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/ndbc.py#L369-L414
Unidata/siphon
siphon/simplewebservice/ndbc.py
NDBC._check_if_url_valid
def _check_if_url_valid(url): """Check if a url is valid (returns 200) or not. Parameters ---------- url : str URL to check Returns ------- bool if url is valid """ r = requests.head(url) if r.status_code == 200: return True else: return False
python
def _check_if_url_valid(url): """Check if a url is valid (returns 200) or not. Parameters ---------- url : str URL to check Returns ------- bool if url is valid """ r = requests.head(url) if r.status_code == 200: return True else: return False
[ "def", "_check_if_url_valid", "(", "url", ")", ":", "r", "=", "requests", ".", "head", "(", "url", ")", "if", "r", ".", "status_code", "==", "200", ":", "return", "True", "else", ":", "return", "False" ]
Check if a url is valid (returns 200) or not. Parameters ---------- url : str URL to check Returns ------- bool if url is valid
[ "Check", "if", "a", "url", "is", "valid", "(", "returns", "200", ")", "or", "not", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/ndbc.py#L462-L479
Unidata/siphon
siphon/simplewebservice/ndbc.py
NDBC.buoy_data_types
def buoy_data_types(cls, buoy): """Determine which types of data are available for a given buoy. Parameters ---------- buoy : str Buoy name Returns ------- dict of valid file extensions and their descriptions """ endpoint = cls() file_types = {'txt': 'standard meteorological data', 'drift': 'meteorological data from drifting buoys and limited moored' 'buoy data mainly from international partners', 'cwind': 'continuous wind data (10 minute average)', 'spec': 'spectral wave summaries', 'data_spec': 'raw spectral wave data', 'swdir': 'spectral wave data (alpha1)', 'swdir2': 'spectral wave data (alpha2)', 'swr1': 'spectral wave data (r1)', 'swr2': 'spectral wave data (r2)', 'adcp': 'acoustic doppler current profiler', 'ocean': 'oceanographic data', 'tide': 'tide data', 'srad': 'solar radiation data', 'dart': 'water column height', 'supl': 'supplemental measurements data', 'rain': 'hourly rain data'} available_data = {} buoy_url = 'https://www.ndbc.noaa.gov/data/realtime2/' + buoy + '.' for key in file_types: if endpoint._check_if_url_valid(buoy_url + key): available_data[key] = file_types[key] return available_data
python
def buoy_data_types(cls, buoy): """Determine which types of data are available for a given buoy. Parameters ---------- buoy : str Buoy name Returns ------- dict of valid file extensions and their descriptions """ endpoint = cls() file_types = {'txt': 'standard meteorological data', 'drift': 'meteorological data from drifting buoys and limited moored' 'buoy data mainly from international partners', 'cwind': 'continuous wind data (10 minute average)', 'spec': 'spectral wave summaries', 'data_spec': 'raw spectral wave data', 'swdir': 'spectral wave data (alpha1)', 'swdir2': 'spectral wave data (alpha2)', 'swr1': 'spectral wave data (r1)', 'swr2': 'spectral wave data (r2)', 'adcp': 'acoustic doppler current profiler', 'ocean': 'oceanographic data', 'tide': 'tide data', 'srad': 'solar radiation data', 'dart': 'water column height', 'supl': 'supplemental measurements data', 'rain': 'hourly rain data'} available_data = {} buoy_url = 'https://www.ndbc.noaa.gov/data/realtime2/' + buoy + '.' for key in file_types: if endpoint._check_if_url_valid(buoy_url + key): available_data[key] = file_types[key] return available_data
[ "def", "buoy_data_types", "(", "cls", ",", "buoy", ")", ":", "endpoint", "=", "cls", "(", ")", "file_types", "=", "{", "'txt'", ":", "'standard meteorological data'", ",", "'drift'", ":", "'meteorological data from drifting buoys and limited moored'", "'buoy data mainly from international partners'", ",", "'cwind'", ":", "'continuous wind data (10 minute average)'", ",", "'spec'", ":", "'spectral wave summaries'", ",", "'data_spec'", ":", "'raw spectral wave data'", ",", "'swdir'", ":", "'spectral wave data (alpha1)'", ",", "'swdir2'", ":", "'spectral wave data (alpha2)'", ",", "'swr1'", ":", "'spectral wave data (r1)'", ",", "'swr2'", ":", "'spectral wave data (r2)'", ",", "'adcp'", ":", "'acoustic doppler current profiler'", ",", "'ocean'", ":", "'oceanographic data'", ",", "'tide'", ":", "'tide data'", ",", "'srad'", ":", "'solar radiation data'", ",", "'dart'", ":", "'water column height'", ",", "'supl'", ":", "'supplemental measurements data'", ",", "'rain'", ":", "'hourly rain data'", "}", "available_data", "=", "{", "}", "buoy_url", "=", "'https://www.ndbc.noaa.gov/data/realtime2/'", "+", "buoy", "+", "'.'", "for", "key", "in", "file_types", ":", "if", "endpoint", ".", "_check_if_url_valid", "(", "buoy_url", "+", "key", ")", ":", "available_data", "[", "key", "]", "=", "file_types", "[", "key", "]", "return", "available_data" ]
Determine which types of data are available for a given buoy. Parameters ---------- buoy : str Buoy name Returns ------- dict of valid file extensions and their descriptions
[ "Determine", "which", "types", "of", "data", "are", "available", "for", "a", "given", "buoy", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/ndbc.py#L482-L518
Unidata/siphon
siphon/simplewebservice/ndbc.py
NDBC.raw_buoy_data
def raw_buoy_data(cls, buoy, data_type='txt'): """Retrieve the raw buoy data contents from NDBC. Parameters ---------- buoy : str Name of buoy data_type : str Type of data requested, must be one of 'txt' standard meteorological data 'drift' meteorological data from drifting buoys and limited moored buoy data mainly from international partners 'cwind' continuous winds data (10 minute average) 'spec' spectral wave summaries 'data_spec' raw spectral wave data 'swdir' spectral wave data (alpha1) 'swdir2' spectral wave data (alpha2) 'swr1' spectral wave data (r1) 'swr2' spectral wave data (r2) 'adcp' acoustic doppler current profiler 'ocean' oceanographic data 'tide' tide data 'srad' solar radiation data 'dart' water column height 'supl' supplemental measurements data 'rain' hourly rain data Returns ------- Raw data string """ endpoint = cls() resp = endpoint.get_path('data/realtime2/{}.{}'.format(buoy, data_type)) return resp.text
python
def raw_buoy_data(cls, buoy, data_type='txt'): """Retrieve the raw buoy data contents from NDBC. Parameters ---------- buoy : str Name of buoy data_type : str Type of data requested, must be one of 'txt' standard meteorological data 'drift' meteorological data from drifting buoys and limited moored buoy data mainly from international partners 'cwind' continuous winds data (10 minute average) 'spec' spectral wave summaries 'data_spec' raw spectral wave data 'swdir' spectral wave data (alpha1) 'swdir2' spectral wave data (alpha2) 'swr1' spectral wave data (r1) 'swr2' spectral wave data (r2) 'adcp' acoustic doppler current profiler 'ocean' oceanographic data 'tide' tide data 'srad' solar radiation data 'dart' water column height 'supl' supplemental measurements data 'rain' hourly rain data Returns ------- Raw data string """ endpoint = cls() resp = endpoint.get_path('data/realtime2/{}.{}'.format(buoy, data_type)) return resp.text
[ "def", "raw_buoy_data", "(", "cls", ",", "buoy", ",", "data_type", "=", "'txt'", ")", ":", "endpoint", "=", "cls", "(", ")", "resp", "=", "endpoint", ".", "get_path", "(", "'data/realtime2/{}.{}'", ".", "format", "(", "buoy", ",", "data_type", ")", ")", "return", "resp", ".", "text" ]
Retrieve the raw buoy data contents from NDBC. Parameters ---------- buoy : str Name of buoy data_type : str Type of data requested, must be one of 'txt' standard meteorological data 'drift' meteorological data from drifting buoys and limited moored buoy data mainly from international partners 'cwind' continuous winds data (10 minute average) 'spec' spectral wave summaries 'data_spec' raw spectral wave data 'swdir' spectral wave data (alpha1) 'swdir2' spectral wave data (alpha2) 'swr1' spectral wave data (r1) 'swr2' spectral wave data (r2) 'adcp' acoustic doppler current profiler 'ocean' oceanographic data 'tide' tide data 'srad' solar radiation data 'dart' water column height 'supl' supplemental measurements data 'rain' hourly rain data Returns ------- Raw data string
[ "Retrieve", "the", "raw", "buoy", "data", "contents", "from", "NDBC", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/ndbc.py#L521-L556
Unidata/siphon
siphon/http_util.py
HTTPSessionManager.create_session
def create_session(self): """Create a new HTTP session with our user-agent set. Returns ------- session : requests.Session The created session See Also -------- urlopen, set_session_options """ ret = requests.Session() ret.headers['User-Agent'] = self.user_agent for k, v in self.options.items(): setattr(ret, k, v) return ret
python
def create_session(self): """Create a new HTTP session with our user-agent set. Returns ------- session : requests.Session The created session See Also -------- urlopen, set_session_options """ ret = requests.Session() ret.headers['User-Agent'] = self.user_agent for k, v in self.options.items(): setattr(ret, k, v) return ret
[ "def", "create_session", "(", "self", ")", ":", "ret", "=", "requests", ".", "Session", "(", ")", "ret", ".", "headers", "[", "'User-Agent'", "]", "=", "self", ".", "user_agent", "for", "k", ",", "v", "in", "self", ".", "options", ".", "items", "(", ")", ":", "setattr", "(", "ret", ",", "k", ",", "v", ")", "return", "ret" ]
Create a new HTTP session with our user-agent set. Returns ------- session : requests.Session The created session See Also -------- urlopen, set_session_options
[ "Create", "a", "new", "HTTP", "session", "with", "our", "user", "-", "agent", "set", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L71-L88
Unidata/siphon
siphon/http_util.py
HTTPSessionManager.urlopen
def urlopen(self, url, **kwargs): """GET a file-like object for a URL using HTTP. This is a thin wrapper around :meth:`requests.Session.get` that returns a file-like object wrapped around the resulting content. Parameters ---------- url : str The URL to request kwargs : arbitrary keyword arguments Additional keyword arguments to pass to :meth:`requests.Session.get`. Returns ------- fobj : file-like object A file-like interface to the content in the response See Also -------- :meth:`requests.Session.get` """ return BytesIO(self.create_session().get(url, **kwargs).content)
python
def urlopen(self, url, **kwargs): """GET a file-like object for a URL using HTTP. This is a thin wrapper around :meth:`requests.Session.get` that returns a file-like object wrapped around the resulting content. Parameters ---------- url : str The URL to request kwargs : arbitrary keyword arguments Additional keyword arguments to pass to :meth:`requests.Session.get`. Returns ------- fobj : file-like object A file-like interface to the content in the response See Also -------- :meth:`requests.Session.get` """ return BytesIO(self.create_session().get(url, **kwargs).content)
[ "def", "urlopen", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "return", "BytesIO", "(", "self", ".", "create_session", "(", ")", ".", "get", "(", "url", ",", "*", "*", "kwargs", ")", ".", "content", ")" ]
GET a file-like object for a URL using HTTP. This is a thin wrapper around :meth:`requests.Session.get` that returns a file-like object wrapped around the resulting content. Parameters ---------- url : str The URL to request kwargs : arbitrary keyword arguments Additional keyword arguments to pass to :meth:`requests.Session.get`. Returns ------- fobj : file-like object A file-like interface to the content in the response See Also -------- :meth:`requests.Session.get`
[ "GET", "a", "file", "-", "like", "object", "for", "a", "URL", "using", "HTTP", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L90-L114
Unidata/siphon
siphon/http_util.py
DataQuery.lonlat_box
def lonlat_box(self, west, east, south, north): """Add a latitude/longitude bounding box to the query. This adds a request for a spatial bounding box, bounded by ('north', 'south') for latitude and ('east', 'west') for the longitude. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- west: float The bounding longitude to the west, in degrees east of the prime meridian east : float The bounding longitude to the east, in degrees east of the prime meridian south : float The bounding latitude to the south, in degrees north of the equator north : float The bounding latitude to the north, in degrees north of the equator Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.spatial_query, west=west, east=east, south=south, north=north) return self
python
def lonlat_box(self, west, east, south, north): """Add a latitude/longitude bounding box to the query. This adds a request for a spatial bounding box, bounded by ('north', 'south') for latitude and ('east', 'west') for the longitude. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- west: float The bounding longitude to the west, in degrees east of the prime meridian east : float The bounding longitude to the east, in degrees east of the prime meridian south : float The bounding latitude to the south, in degrees north of the equator north : float The bounding latitude to the north, in degrees north of the equator Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.spatial_query, west=west, east=east, south=south, north=north) return self
[ "def", "lonlat_box", "(", "self", ",", "west", ",", "east", ",", "south", ",", "north", ")", ":", "self", ".", "_set_query", "(", "self", ".", "spatial_query", ",", "west", "=", "west", ",", "east", "=", "east", ",", "south", "=", "south", ",", "north", "=", "north", ")", "return", "self" ]
Add a latitude/longitude bounding box to the query. This adds a request for a spatial bounding box, bounded by ('north', 'south') for latitude and ('east', 'west') for the longitude. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- west: float The bounding longitude to the west, in degrees east of the prime meridian east : float The bounding longitude to the east, in degrees east of the prime meridian south : float The bounding latitude to the south, in degrees north of the equator north : float The bounding latitude to the north, in degrees north of the equator Returns ------- self : DataQuery Returns self for chaining calls
[ "Add", "a", "latitude", "/", "longitude", "bounding", "box", "to", "the", "query", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L197-L226
Unidata/siphon
siphon/http_util.py
DataQuery.lonlat_point
def lonlat_point(self, lon, lat): """Add a latitude/longitude point to the query. This adds a request for a (`lon`, `lat`) point. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- lon: float The longitude to request lat : float The latitude to request Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.spatial_query, longitude=lon, latitude=lat) return self
python
def lonlat_point(self, lon, lat): """Add a latitude/longitude point to the query. This adds a request for a (`lon`, `lat`) point. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- lon: float The longitude to request lat : float The latitude to request Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.spatial_query, longitude=lon, latitude=lat) return self
[ "def", "lonlat_point", "(", "self", ",", "lon", ",", "lat", ")", ":", "self", ".", "_set_query", "(", "self", ".", "spatial_query", ",", "longitude", "=", "lon", ",", "latitude", "=", "lat", ")", "return", "self" ]
Add a latitude/longitude point to the query. This adds a request for a (`lon`, `lat`) point. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- lon: float The longitude to request lat : float The latitude to request Returns ------- self : DataQuery Returns self for chaining calls
[ "Add", "a", "latitude", "/", "longitude", "point", "to", "the", "query", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L228-L251
Unidata/siphon
siphon/http_util.py
DataQuery.time
def time(self, time): """Add a request for a specific time to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- time : datetime.datetime The time to request Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.time_query, time=self._format_time(time)) return self
python
def time(self, time): """Add a request for a specific time to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- time : datetime.datetime The time to request Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.time_query, time=self._format_time(time)) return self
[ "def", "time", "(", "self", ",", "time", ")", ":", "self", ".", "_set_query", "(", "self", ".", "time_query", ",", "time", "=", "self", ".", "_format_time", "(", "time", ")", ")", "return", "self" ]
Add a request for a specific time to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- time : datetime.datetime The time to request Returns ------- self : DataQuery Returns self for chaining calls
[ "Add", "a", "request", "for", "a", "specific", "time", "to", "the", "query", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L277-L297
Unidata/siphon
siphon/http_util.py
DataQuery.time_range
def time_range(self, start, end): """Add a request for a time range to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- start : datetime.datetime The start of the requested time range end : datetime.datetime The end of the requested time range Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.time_query, time_start=self._format_time(start), time_end=self._format_time(end)) return self
python
def time_range(self, start, end): """Add a request for a time range to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- start : datetime.datetime The start of the requested time range end : datetime.datetime The end of the requested time range Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.time_query, time_start=self._format_time(start), time_end=self._format_time(end)) return self
[ "def", "time_range", "(", "self", ",", "start", ",", "end", ")", ":", "self", ".", "_set_query", "(", "self", ".", "time_query", ",", "time_start", "=", "self", ".", "_format_time", "(", "start", ")", ",", "time_end", "=", "self", ".", "_format_time", "(", "end", ")", ")", "return", "self" ]
Add a request for a time range to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- start : datetime.datetime The start of the requested time range end : datetime.datetime The end of the requested time range Returns ------- self : DataQuery Returns self for chaining calls
[ "Add", "a", "request", "for", "a", "time", "range", "to", "the", "query", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L299-L322
Unidata/siphon
siphon/http_util.py
HTTPEndPoint.get_query
def get_query(self, query): """Make a GET request, including a query, to the endpoint. The path of the request is to the base URL assigned to the endpoint. Parameters ---------- query : DataQuery The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_path, get """ url = self._base[:-1] if self._base[-1] == '/' else self._base return self.get(url, query)
python
def get_query(self, query): """Make a GET request, including a query, to the endpoint. The path of the request is to the base URL assigned to the endpoint. Parameters ---------- query : DataQuery The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_path, get """ url = self._base[:-1] if self._base[-1] == '/' else self._base return self.get(url, query)
[ "def", "get_query", "(", "self", ",", "query", ")", ":", "url", "=", "self", ".", "_base", "[", ":", "-", "1", "]", "if", "self", ".", "_base", "[", "-", "1", "]", "==", "'/'", "else", "self", ".", "_base", "return", "self", ".", "get", "(", "url", ",", "query", ")" ]
Make a GET request, including a query, to the endpoint. The path of the request is to the base URL assigned to the endpoint. Parameters ---------- query : DataQuery The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_path, get
[ "Make", "a", "GET", "request", "including", "a", "query", "to", "the", "endpoint", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L381-L402
Unidata/siphon
siphon/http_util.py
HTTPEndPoint.get_path
def get_path(self, path, query=None): """Make a GET request, optionally including a query, to a relative path. The path of the request includes a path on top of the base URL assigned to the endpoint. Parameters ---------- path : str The path to request, relative to the endpoint query : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_query, get, url_path """ return self.get(self.url_path(path), query)
python
def get_path(self, path, query=None): """Make a GET request, optionally including a query, to a relative path. The path of the request includes a path on top of the base URL assigned to the endpoint. Parameters ---------- path : str The path to request, relative to the endpoint query : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_query, get, url_path """ return self.get(self.url_path(path), query)
[ "def", "get_path", "(", "self", ",", "path", ",", "query", "=", "None", ")", ":", "return", "self", ".", "get", "(", "self", ".", "url_path", "(", "path", ")", ",", "query", ")" ]
Make a GET request, optionally including a query, to a relative path. The path of the request includes a path on top of the base URL assigned to the endpoint. Parameters ---------- path : str The path to request, relative to the endpoint query : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_query, get, url_path
[ "Make", "a", "GET", "request", "optionally", "including", "a", "query", "to", "a", "relative", "path", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L426-L449
Unidata/siphon
siphon/http_util.py
HTTPEndPoint.get
def get(self, path, params=None): """Make a GET request, optionally including a parameters, to a path. The path of the request is the full URL. Parameters ---------- path : str The URL to request params : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request Raises ------ HTTPError If the server returns anything other than a 200 (OK) code See Also -------- get_query, get """ resp = self._session.get(path, params=params) if resp.status_code != 200: if resp.headers.get('Content-Type', '').startswith('text/html'): text = resp.reason else: text = resp.text raise requests.HTTPError('Error accessing {0}\n' 'Server Error ({1:d}: {2})'.format(resp.request.url, resp.status_code, text)) return resp
python
def get(self, path, params=None): """Make a GET request, optionally including a parameters, to a path. The path of the request is the full URL. Parameters ---------- path : str The URL to request params : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request Raises ------ HTTPError If the server returns anything other than a 200 (OK) code See Also -------- get_query, get """ resp = self._session.get(path, params=params) if resp.status_code != 200: if resp.headers.get('Content-Type', '').startswith('text/html'): text = resp.reason else: text = resp.text raise requests.HTTPError('Error accessing {0}\n' 'Server Error ({1:d}: {2})'.format(resp.request.url, resp.status_code, text)) return resp
[ "def", "get", "(", "self", ",", "path", ",", "params", "=", "None", ")", ":", "resp", "=", "self", ".", "_session", ".", "get", "(", "path", ",", "params", "=", "params", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "if", "resp", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", ".", "startswith", "(", "'text/html'", ")", ":", "text", "=", "resp", ".", "reason", "else", ":", "text", "=", "resp", ".", "text", "raise", "requests", ".", "HTTPError", "(", "'Error accessing {0}\\n'", "'Server Error ({1:d}: {2})'", ".", "format", "(", "resp", ".", "request", ".", "url", ",", "resp", ".", "status_code", ",", "text", ")", ")", "return", "resp" ]
Make a GET request, optionally including a parameters, to a path. The path of the request is the full URL. Parameters ---------- path : str The URL to request params : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request Raises ------ HTTPError If the server returns anything other than a 200 (OK) code See Also -------- get_query, get
[ "Make", "a", "GET", "request", "optionally", "including", "a", "parameters", "to", "a", "path", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L451-L488
Unidata/siphon
siphon/cdmr/dataset.py
Group.path
def path(self): """Return the full path to the Group, including any parent Groups.""" # If root, return '/' if self.dataset is self: return '' else: # Otherwise recurse return self.dataset.path + '/' + self.name
python
def path(self): """Return the full path to the Group, including any parent Groups.""" # If root, return '/' if self.dataset is self: return '' else: # Otherwise recurse return self.dataset.path + '/' + self.name
[ "def", "path", "(", "self", ")", ":", "# If root, return '/'", "if", "self", ".", "dataset", "is", "self", ":", "return", "''", "else", ":", "# Otherwise recurse", "return", "self", ".", "dataset", ".", "path", "+", "'/'", "+", "self", ".", "name" ]
Return the full path to the Group, including any parent Groups.
[ "Return", "the", "full", "path", "to", "the", "Group", "including", "any", "parent", "Groups", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/dataset.py#L53-L59
Unidata/siphon
siphon/cdmr/dataset.py
Group.load_from_stream
def load_from_stream(self, group): """Load a Group from an NCStream object.""" self._unpack_attrs(group.atts) self.name = group.name for dim in group.dims: new_dim = Dimension(self, dim.name) self.dimensions[dim.name] = new_dim new_dim.load_from_stream(dim) for var in group.vars: new_var = Variable(self, var.name) self.variables[var.name] = new_var new_var.load_from_stream(var) for grp in group.groups: new_group = Group(self) self.groups[grp.name] = new_group new_group.load_from_stream(grp) for struct in group.structs: new_var = Variable(self, struct.name) self.variables[struct.name] = new_var new_var.load_from_stream(struct) if group.enumTypes: for en in group.enumTypes: self.types[en.name] = enum.Enum(en.name, [(typ.value, typ.code) for typ in en.map])
python
def load_from_stream(self, group): """Load a Group from an NCStream object.""" self._unpack_attrs(group.atts) self.name = group.name for dim in group.dims: new_dim = Dimension(self, dim.name) self.dimensions[dim.name] = new_dim new_dim.load_from_stream(dim) for var in group.vars: new_var = Variable(self, var.name) self.variables[var.name] = new_var new_var.load_from_stream(var) for grp in group.groups: new_group = Group(self) self.groups[grp.name] = new_group new_group.load_from_stream(grp) for struct in group.structs: new_var = Variable(self, struct.name) self.variables[struct.name] = new_var new_var.load_from_stream(struct) if group.enumTypes: for en in group.enumTypes: self.types[en.name] = enum.Enum(en.name, [(typ.value, typ.code) for typ in en.map])
[ "def", "load_from_stream", "(", "self", ",", "group", ")", ":", "self", ".", "_unpack_attrs", "(", "group", ".", "atts", ")", "self", ".", "name", "=", "group", ".", "name", "for", "dim", "in", "group", ".", "dims", ":", "new_dim", "=", "Dimension", "(", "self", ",", "dim", ".", "name", ")", "self", ".", "dimensions", "[", "dim", ".", "name", "]", "=", "new_dim", "new_dim", ".", "load_from_stream", "(", "dim", ")", "for", "var", "in", "group", ".", "vars", ":", "new_var", "=", "Variable", "(", "self", ",", "var", ".", "name", ")", "self", ".", "variables", "[", "var", ".", "name", "]", "=", "new_var", "new_var", ".", "load_from_stream", "(", "var", ")", "for", "grp", "in", "group", ".", "groups", ":", "new_group", "=", "Group", "(", "self", ")", "self", ".", "groups", "[", "grp", ".", "name", "]", "=", "new_group", "new_group", ".", "load_from_stream", "(", "grp", ")", "for", "struct", "in", "group", ".", "structs", ":", "new_var", "=", "Variable", "(", "self", ",", "struct", ".", "name", ")", "self", ".", "variables", "[", "struct", ".", "name", "]", "=", "new_var", "new_var", ".", "load_from_stream", "(", "struct", ")", "if", "group", ".", "enumTypes", ":", "for", "en", "in", "group", ".", "enumTypes", ":", "self", ".", "types", "[", "en", ".", "name", "]", "=", "enum", ".", "Enum", "(", "en", ".", "name", ",", "[", "(", "typ", ".", "value", ",", "typ", ".", "code", ")", "for", "typ", "in", "en", ".", "map", "]", ")" ]
Load a Group from an NCStream object.
[ "Load", "a", "Group", "from", "an", "NCStream", "object", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/dataset.py#L61-L89
Unidata/siphon
siphon/cdmr/dataset.py
Variable.load_from_stream
def load_from_stream(self, var): """Populate the Variable from an NCStream object.""" dims = [] for d in var.shape: dim = Dimension(None, d.name) dim.load_from_stream(d) dims.append(dim) self.dimensions = tuple(dim.name for dim in dims) self.shape = tuple(dim.size for dim in dims) self.ndim = len(var.shape) self._unpack_attrs(var.atts) data, dt, type_name = unpack_variable(var) if data is not None: data = data.reshape(self.shape) self._data = data self.dtype = dt self.datatype = type_name if hasattr(var, 'enumType') and var.enumType: self.datatype = var.enumType self._enum = True
python
def load_from_stream(self, var): """Populate the Variable from an NCStream object.""" dims = [] for d in var.shape: dim = Dimension(None, d.name) dim.load_from_stream(d) dims.append(dim) self.dimensions = tuple(dim.name for dim in dims) self.shape = tuple(dim.size for dim in dims) self.ndim = len(var.shape) self._unpack_attrs(var.atts) data, dt, type_name = unpack_variable(var) if data is not None: data = data.reshape(self.shape) self._data = data self.dtype = dt self.datatype = type_name if hasattr(var, 'enumType') and var.enumType: self.datatype = var.enumType self._enum = True
[ "def", "load_from_stream", "(", "self", ",", "var", ")", ":", "dims", "=", "[", "]", "for", "d", "in", "var", ".", "shape", ":", "dim", "=", "Dimension", "(", "None", ",", "d", ".", "name", ")", "dim", ".", "load_from_stream", "(", "d", ")", "dims", ".", "append", "(", "dim", ")", "self", ".", "dimensions", "=", "tuple", "(", "dim", ".", "name", "for", "dim", "in", "dims", ")", "self", ".", "shape", "=", "tuple", "(", "dim", ".", "size", "for", "dim", "in", "dims", ")", "self", ".", "ndim", "=", "len", "(", "var", ".", "shape", ")", "self", ".", "_unpack_attrs", "(", "var", ".", "atts", ")", "data", ",", "dt", ",", "type_name", "=", "unpack_variable", "(", "var", ")", "if", "data", "is", "not", "None", ":", "data", "=", "data", ".", "reshape", "(", "self", ".", "shape", ")", "self", ".", "_data", "=", "data", "self", ".", "dtype", "=", "dt", "self", ".", "datatype", "=", "type_name", "if", "hasattr", "(", "var", ",", "'enumType'", ")", "and", "var", ".", "enumType", ":", "self", ".", "datatype", "=", "var", ".", "enumType", "self", ".", "_enum", "=", "True" ]
Populate the Variable from an NCStream object.
[ "Populate", "the", "Variable", "from", "an", "NCStream", "object", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/dataset.py#L288-L310
Unidata/siphon
siphon/cdmr/dataset.py
Dimension.load_from_stream
def load_from_stream(self, dim): """Load from an NCStream object.""" self.unlimited = dim.isUnlimited self.private = dim.isPrivate self.vlen = dim.isVlen if not self.vlen: self.size = dim.length
python
def load_from_stream(self, dim): """Load from an NCStream object.""" self.unlimited = dim.isUnlimited self.private = dim.isPrivate self.vlen = dim.isVlen if not self.vlen: self.size = dim.length
[ "def", "load_from_stream", "(", "self", ",", "dim", ")", ":", "self", ".", "unlimited", "=", "dim", ".", "isUnlimited", "self", ".", "private", "=", "dim", ".", "isPrivate", "self", ".", "vlen", "=", "dim", ".", "isVlen", "if", "not", "self", ".", "vlen", ":", "self", ".", "size", "=", "dim", ".", "length" ]
Load from an NCStream object.
[ "Load", "from", "an", "NCStream", "object", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/dataset.py#L349-L355
Unidata/siphon
siphon/cdmr/coveragedataset.py
CoverageDataset._read_header
def _read_header(self): """Get the needed header information to initialize dataset.""" self._header = self.cdmrf.fetch_header() self.load_from_stream(self._header)
python
def _read_header(self): """Get the needed header information to initialize dataset.""" self._header = self.cdmrf.fetch_header() self.load_from_stream(self._header)
[ "def", "_read_header", "(", "self", ")", ":", "self", ".", "_header", "=", "self", ".", "cdmrf", ".", "fetch_header", "(", ")", "self", ".", "load_from_stream", "(", "self", ".", "_header", ")" ]
Get the needed header information to initialize dataset.
[ "Get", "the", "needed", "header", "information", "to", "initialize", "dataset", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/coveragedataset.py#L45-L48
Unidata/siphon
siphon/cdmr/coveragedataset.py
CoverageDataset.load_from_stream
def load_from_stream(self, header): """Populate the CoverageDataset from the protobuf information.""" self._unpack_attrs(header.atts) self.name = header.name self.lon_lat_domain = header.latlonRect self.proj_domain = header.projRect self.date_range = header.dateRange self.type = header.coverageType for sys in header.coordSys: self.coord_systems[sys.name] = sys for trans in header.coordTransforms: self.transforms[trans.name] = trans for ax in header.coordAxes: self.axes[ax.name] = ax for cov in header.grids: self.grids[cov.name] = cov
python
def load_from_stream(self, header): """Populate the CoverageDataset from the protobuf information.""" self._unpack_attrs(header.atts) self.name = header.name self.lon_lat_domain = header.latlonRect self.proj_domain = header.projRect self.date_range = header.dateRange self.type = header.coverageType for sys in header.coordSys: self.coord_systems[sys.name] = sys for trans in header.coordTransforms: self.transforms[trans.name] = trans for ax in header.coordAxes: self.axes[ax.name] = ax for cov in header.grids: self.grids[cov.name] = cov
[ "def", "load_from_stream", "(", "self", ",", "header", ")", ":", "self", ".", "_unpack_attrs", "(", "header", ".", "atts", ")", "self", ".", "name", "=", "header", ".", "name", "self", ".", "lon_lat_domain", "=", "header", ".", "latlonRect", "self", ".", "proj_domain", "=", "header", ".", "projRect", "self", ".", "date_range", "=", "header", ".", "dateRange", "self", ".", "type", "=", "header", ".", "coverageType", "for", "sys", "in", "header", ".", "coordSys", ":", "self", ".", "coord_systems", "[", "sys", ".", "name", "]", "=", "sys", "for", "trans", "in", "header", ".", "coordTransforms", ":", "self", ".", "transforms", "[", "trans", ".", "name", "]", "=", "trans", "for", "ax", "in", "header", ".", "coordAxes", ":", "self", ".", "axes", "[", "ax", ".", "name", "]", "=", "ax", "for", "cov", "in", "header", ".", "grids", ":", "self", ".", "grids", "[", "cov", ".", "name", "]", "=", "cov" ]
Populate the CoverageDataset from the protobuf information.
[ "Populate", "the", "CoverageDataset", "from", "the", "protobuf", "information", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/coveragedataset.py#L50-L69
Unidata/siphon
siphon/simplewebservice/iastate.py
IAStateUpperAir.request_data
def request_data(cls, time, site_id, **kwargs): """Retrieve upper air observations from Iowa State's archive for a single station. Parameters ---------- time : datetime The date and time of the desired observation. site_id : str The three letter ICAO identifier of the station for which data should be downloaded. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data """ endpoint = cls() df = endpoint._get_data(time, site_id, None, **kwargs) return df
python
def request_data(cls, time, site_id, **kwargs): """Retrieve upper air observations from Iowa State's archive for a single station. Parameters ---------- time : datetime The date and time of the desired observation. site_id : str The three letter ICAO identifier of the station for which data should be downloaded. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data """ endpoint = cls() df = endpoint._get_data(time, site_id, None, **kwargs) return df
[ "def", "request_data", "(", "cls", ",", "time", ",", "site_id", ",", "*", "*", "kwargs", ")", ":", "endpoint", "=", "cls", "(", ")", "df", "=", "endpoint", ".", "_get_data", "(", "time", ",", "site_id", ",", "None", ",", "*", "*", "kwargs", ")", "return", "df" ]
Retrieve upper air observations from Iowa State's archive for a single station. Parameters ---------- time : datetime The date and time of the desired observation. site_id : str The three letter ICAO identifier of the station for which data should be downloaded. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data
[ "Retrieve", "upper", "air", "observations", "from", "Iowa", "State", "s", "archive", "for", "a", "single", "station", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/iastate.py#L28-L50
Unidata/siphon
siphon/simplewebservice/iastate.py
IAStateUpperAir.request_all_data
def request_all_data(cls, time, pressure=None, **kwargs): """Retrieve upper air observations from Iowa State's archive for all stations. Parameters ---------- time : datetime The date and time of the desired observation. pressure : float, optional The mandatory pressure level at which to request data (in hPa). If none is given, all the available data in the profiles is returned. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data """ endpoint = cls() df = endpoint._get_data(time, None, pressure, **kwargs) return df
python
def request_all_data(cls, time, pressure=None, **kwargs): """Retrieve upper air observations from Iowa State's archive for all stations. Parameters ---------- time : datetime The date and time of the desired observation. pressure : float, optional The mandatory pressure level at which to request data (in hPa). If none is given, all the available data in the profiles is returned. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data """ endpoint = cls() df = endpoint._get_data(time, None, pressure, **kwargs) return df
[ "def", "request_all_data", "(", "cls", ",", "time", ",", "pressure", "=", "None", ",", "*", "*", "kwargs", ")", ":", "endpoint", "=", "cls", "(", ")", "df", "=", "endpoint", ".", "_get_data", "(", "time", ",", "None", ",", "pressure", ",", "*", "*", "kwargs", ")", "return", "df" ]
Retrieve upper air observations from Iowa State's archive for all stations. Parameters ---------- time : datetime The date and time of the desired observation. pressure : float, optional The mandatory pressure level at which to request data (in hPa). If none is given, all the available data in the profiles is returned. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data
[ "Retrieve", "upper", "air", "observations", "from", "Iowa", "State", "s", "archive", "for", "all", "stations", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/iastate.py#L53-L75
Unidata/siphon
siphon/simplewebservice/iastate.py
IAStateUpperAir._get_data
def _get_data(self, time, site_id, pressure=None): """Download data from Iowa State's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded pressure : float, optional Mandatory pressure level at which to request data (in hPa). Returns ------- :class:`pandas.DataFrame` containing the data """ json_data = self._get_data_raw(time, site_id, pressure) data = {} for profile in json_data['profiles']: for pt in profile['profile']: for field in ('drct', 'dwpc', 'hght', 'pres', 'sknt', 'tmpc'): data.setdefault(field, []).append(np.nan if pt[field] is None else pt[field]) for field in ('station', 'valid'): data.setdefault(field, []).append(np.nan if profile[field] is None else profile[field]) # Make sure that the first entry has a valid temperature and dewpoint idx = np.argmax(~(np.isnan(data['tmpc']) | np.isnan(data['dwpc']))) # Stuff data into a pandas dataframe df = pd.DataFrame() df['pressure'] = ma.masked_invalid(data['pres'][idx:]) df['height'] = ma.masked_invalid(data['hght'][idx:]) df['temperature'] = ma.masked_invalid(data['tmpc'][idx:]) df['dewpoint'] = ma.masked_invalid(data['dwpc'][idx:]) df['direction'] = ma.masked_invalid(data['drct'][idx:]) df['speed'] = ma.masked_invalid(data['sknt'][idx:]) df['station'] = data['station'][idx:] df['time'] = [datetime.strptime(valid, '%Y-%m-%dT%H:%M:%SZ') for valid in data['valid'][idx:]] # Calculate the u and v winds df['u_wind'], df['v_wind'] = get_wind_components(df['speed'], np.deg2rad(df['direction'])) # Drop any rows with all NaN values for T, Td, winds df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) # Add unit dictionary df.units = {'pressure': 'hPa', 'height': 'meter', 'temperature': 'degC', 'dewpoint': 'degC', 'direction': 'degrees', 'speed': 'knot', 'u_wind': 'knot', 'v_wind': 'knot', 'station': None, 'time': None} return df
python
def _get_data(self, time, site_id, pressure=None): """Download data from Iowa State's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded pressure : float, optional Mandatory pressure level at which to request data (in hPa). Returns ------- :class:`pandas.DataFrame` containing the data """ json_data = self._get_data_raw(time, site_id, pressure) data = {} for profile in json_data['profiles']: for pt in profile['profile']: for field in ('drct', 'dwpc', 'hght', 'pres', 'sknt', 'tmpc'): data.setdefault(field, []).append(np.nan if pt[field] is None else pt[field]) for field in ('station', 'valid'): data.setdefault(field, []).append(np.nan if profile[field] is None else profile[field]) # Make sure that the first entry has a valid temperature and dewpoint idx = np.argmax(~(np.isnan(data['tmpc']) | np.isnan(data['dwpc']))) # Stuff data into a pandas dataframe df = pd.DataFrame() df['pressure'] = ma.masked_invalid(data['pres'][idx:]) df['height'] = ma.masked_invalid(data['hght'][idx:]) df['temperature'] = ma.masked_invalid(data['tmpc'][idx:]) df['dewpoint'] = ma.masked_invalid(data['dwpc'][idx:]) df['direction'] = ma.masked_invalid(data['drct'][idx:]) df['speed'] = ma.masked_invalid(data['sknt'][idx:]) df['station'] = data['station'][idx:] df['time'] = [datetime.strptime(valid, '%Y-%m-%dT%H:%M:%SZ') for valid in data['valid'][idx:]] # Calculate the u and v winds df['u_wind'], df['v_wind'] = get_wind_components(df['speed'], np.deg2rad(df['direction'])) # Drop any rows with all NaN values for T, Td, winds df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) # Add unit dictionary df.units = {'pressure': 'hPa', 'height': 'meter', 'temperature': 'degC', 'dewpoint': 'degC', 'direction': 'degrees', 'speed': 'knot', 'u_wind': 'knot', 'v_wind': 'knot', 'station': None, 'time': None} return df
[ "def", "_get_data", "(", "self", ",", "time", ",", "site_id", ",", "pressure", "=", "None", ")", ":", "json_data", "=", "self", ".", "_get_data_raw", "(", "time", ",", "site_id", ",", "pressure", ")", "data", "=", "{", "}", "for", "profile", "in", "json_data", "[", "'profiles'", "]", ":", "for", "pt", "in", "profile", "[", "'profile'", "]", ":", "for", "field", "in", "(", "'drct'", ",", "'dwpc'", ",", "'hght'", ",", "'pres'", ",", "'sknt'", ",", "'tmpc'", ")", ":", "data", ".", "setdefault", "(", "field", ",", "[", "]", ")", ".", "append", "(", "np", ".", "nan", "if", "pt", "[", "field", "]", "is", "None", "else", "pt", "[", "field", "]", ")", "for", "field", "in", "(", "'station'", ",", "'valid'", ")", ":", "data", ".", "setdefault", "(", "field", ",", "[", "]", ")", ".", "append", "(", "np", ".", "nan", "if", "profile", "[", "field", "]", "is", "None", "else", "profile", "[", "field", "]", ")", "# Make sure that the first entry has a valid temperature and dewpoint", "idx", "=", "np", ".", "argmax", "(", "~", "(", "np", ".", "isnan", "(", "data", "[", "'tmpc'", "]", ")", "|", "np", ".", "isnan", "(", "data", "[", "'dwpc'", "]", ")", ")", ")", "# Stuff data into a pandas dataframe", "df", "=", "pd", ".", "DataFrame", "(", ")", "df", "[", "'pressure'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'pres'", "]", "[", "idx", ":", "]", ")", "df", "[", "'height'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'hght'", "]", "[", "idx", ":", "]", ")", "df", "[", "'temperature'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'tmpc'", "]", "[", "idx", ":", "]", ")", "df", "[", "'dewpoint'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'dwpc'", "]", "[", "idx", ":", "]", ")", "df", "[", "'direction'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'drct'", "]", "[", "idx", ":", "]", ")", "df", "[", "'speed'", "]", "=", "ma", ".", "masked_invalid", "(", "data", "[", "'sknt'", "]", "[", "idx", ":", "]", ")", "df", "[", "'station'", "]", "=", "data", "[", "'station'", "]", "[", "idx", ":", "]", "df", "[", "'time'", "]", "=", "[", "datetime", ".", "strptime", "(", "valid", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", "for", "valid", "in", "data", "[", "'valid'", "]", "[", "idx", ":", "]", "]", "# Calculate the u and v winds", "df", "[", "'u_wind'", "]", ",", "df", "[", "'v_wind'", "]", "=", "get_wind_components", "(", "df", "[", "'speed'", "]", ",", "np", ".", "deg2rad", "(", "df", "[", "'direction'", "]", ")", ")", "# Drop any rows with all NaN values for T, Td, winds", "df", "=", "df", ".", "dropna", "(", "subset", "=", "(", "'temperature'", ",", "'dewpoint'", ",", "'direction'", ",", "'speed'", ",", "'u_wind'", ",", "'v_wind'", ")", ",", "how", "=", "'all'", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "# Add unit dictionary", "df", ".", "units", "=", "{", "'pressure'", ":", "'hPa'", ",", "'height'", ":", "'meter'", ",", "'temperature'", ":", "'degC'", ",", "'dewpoint'", ":", "'degC'", ",", "'direction'", ":", "'degrees'", ",", "'speed'", ":", "'knot'", ",", "'u_wind'", ":", "'knot'", ",", "'v_wind'", ":", "'knot'", ",", "'station'", ":", "None", ",", "'time'", ":", "None", "}", "return", "df" ]
Download data from Iowa State's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded pressure : float, optional Mandatory pressure level at which to request data (in hPa). Returns ------- :class:`pandas.DataFrame` containing the data
[ "Download", "data", "from", "Iowa", "State", "s", "upper", "air", "archive", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/iastate.py#L77-L139
Unidata/siphon
siphon/simplewebservice/iastate.py
IAStateUpperAir._get_data_raw
def _get_data_raw(self, time, site_id, pressure=None): r"""Download data from the Iowa State's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded pressure : float, optional Mandatory pressure level at which to request data (in hPa). Returns ------- list of json data """ query = {'ts': time.strftime('%Y%m%d%H00')} if site_id is not None: query['station'] = site_id if pressure is not None: query['pressure'] = pressure resp = self.get_path('raob.py', query) json_data = json.loads(resp.text) # See if the return is valid, but has no data if not (json_data['profiles'] and json_data['profiles'][0]['profile']): message = 'No data available ' if time is not None: message += 'for {time:%Y-%m-%d %HZ} '.format(time=time) if site_id is not None: message += 'for station {stid}'.format(stid=site_id) if pressure is not None: message += 'for pressure {pres}'.format(pres=pressure) message = message[:-1] + '.' raise ValueError(message) return json_data
python
def _get_data_raw(self, time, site_id, pressure=None): r"""Download data from the Iowa State's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded pressure : float, optional Mandatory pressure level at which to request data (in hPa). Returns ------- list of json data """ query = {'ts': time.strftime('%Y%m%d%H00')} if site_id is not None: query['station'] = site_id if pressure is not None: query['pressure'] = pressure resp = self.get_path('raob.py', query) json_data = json.loads(resp.text) # See if the return is valid, but has no data if not (json_data['profiles'] and json_data['profiles'][0]['profile']): message = 'No data available ' if time is not None: message += 'for {time:%Y-%m-%d %HZ} '.format(time=time) if site_id is not None: message += 'for station {stid}'.format(stid=site_id) if pressure is not None: message += 'for pressure {pres}'.format(pres=pressure) message = message[:-1] + '.' raise ValueError(message) return json_data
[ "def", "_get_data_raw", "(", "self", ",", "time", ",", "site_id", ",", "pressure", "=", "None", ")", ":", "query", "=", "{", "'ts'", ":", "time", ".", "strftime", "(", "'%Y%m%d%H00'", ")", "}", "if", "site_id", "is", "not", "None", ":", "query", "[", "'station'", "]", "=", "site_id", "if", "pressure", "is", "not", "None", ":", "query", "[", "'pressure'", "]", "=", "pressure", "resp", "=", "self", ".", "get_path", "(", "'raob.py'", ",", "query", ")", "json_data", "=", "json", ".", "loads", "(", "resp", ".", "text", ")", "# See if the return is valid, but has no data", "if", "not", "(", "json_data", "[", "'profiles'", "]", "and", "json_data", "[", "'profiles'", "]", "[", "0", "]", "[", "'profile'", "]", ")", ":", "message", "=", "'No data available '", "if", "time", "is", "not", "None", ":", "message", "+=", "'for {time:%Y-%m-%d %HZ} '", ".", "format", "(", "time", "=", "time", ")", "if", "site_id", "is", "not", "None", ":", "message", "+=", "'for station {stid}'", ".", "format", "(", "stid", "=", "site_id", ")", "if", "pressure", "is", "not", "None", ":", "message", "+=", "'for pressure {pres}'", ".", "format", "(", "pres", "=", "pressure", ")", "message", "=", "message", "[", ":", "-", "1", "]", "+", "'.'", "raise", "ValueError", "(", "message", ")", "return", "json_data" ]
r"""Download data from the Iowa State's upper air archive. Parameters ---------- time : datetime Date and time for which data should be downloaded site_id : str Site id for which data should be downloaded pressure : float, optional Mandatory pressure level at which to request data (in hPa). Returns ------- list of json data
[ "r", "Download", "data", "from", "the", "Iowa", "State", "s", "upper", "air", "archive", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/iastate.py#L141-L178
Unidata/siphon
siphon/radarserver.py
parse_station_table
def parse_station_table(root): """Parse station list XML file.""" stations = [parse_xml_station(elem) for elem in root.findall('station')] return {st.id: st for st in stations}
python
def parse_station_table(root): """Parse station list XML file.""" stations = [parse_xml_station(elem) for elem in root.findall('station')] return {st.id: st for st in stations}
[ "def", "parse_station_table", "(", "root", ")", ":", "stations", "=", "[", "parse_xml_station", "(", "elem", ")", "for", "elem", "in", "root", ".", "findall", "(", "'station'", ")", "]", "return", "{", "st", ".", "id", ":", "st", "for", "st", "in", "stations", "}" ]
Parse station list XML file.
[ "Parse", "station", "list", "XML", "file", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/radarserver.py#L217-L220
Unidata/siphon
siphon/radarserver.py
parse_xml_station
def parse_xml_station(elem): """Create a :class:`Station` instance from an XML tag.""" stid = elem.attrib['id'] name = elem.find('name').text lat = float(elem.find('latitude').text) lon = float(elem.find('longitude').text) elev = float(elem.find('elevation').text) return Station(id=stid, elevation=elev, latitude=lat, longitude=lon, name=name)
python
def parse_xml_station(elem): """Create a :class:`Station` instance from an XML tag.""" stid = elem.attrib['id'] name = elem.find('name').text lat = float(elem.find('latitude').text) lon = float(elem.find('longitude').text) elev = float(elem.find('elevation').text) return Station(id=stid, elevation=elev, latitude=lat, longitude=lon, name=name)
[ "def", "parse_xml_station", "(", "elem", ")", ":", "stid", "=", "elem", ".", "attrib", "[", "'id'", "]", "name", "=", "elem", ".", "find", "(", "'name'", ")", ".", "text", "lat", "=", "float", "(", "elem", ".", "find", "(", "'latitude'", ")", ".", "text", ")", "lon", "=", "float", "(", "elem", ".", "find", "(", "'longitude'", ")", ".", "text", ")", "elev", "=", "float", "(", "elem", ".", "find", "(", "'elevation'", ")", ".", "text", ")", "return", "Station", "(", "id", "=", "stid", ",", "elevation", "=", "elev", ",", "latitude", "=", "lat", ",", "longitude", "=", "lon", ",", "name", "=", "name", ")" ]
Create a :class:`Station` instance from an XML tag.
[ "Create", "a", ":", "class", ":", "Station", "instance", "from", "an", "XML", "tag", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/radarserver.py#L223-L230
Unidata/siphon
siphon/radarserver.py
RadarQuery.stations
def stations(self, *stns): """Specify one or more stations for the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- stns : one or more strings One or more names of variables to request Returns ------- self : RadarQuery Returns self for chaining calls """ self._set_query(self.spatial_query, stn=stns) return self
python
def stations(self, *stns): """Specify one or more stations for the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- stns : one or more strings One or more names of variables to request Returns ------- self : RadarQuery Returns self for chaining calls """ self._set_query(self.spatial_query, stn=stns) return self
[ "def", "stations", "(", "self", ",", "*", "stns", ")", ":", "self", ".", "_set_query", "(", "self", ".", "spatial_query", ",", "stn", "=", "stns", ")", "return", "self" ]
Specify one or more stations for the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- stns : one or more strings One or more names of variables to request Returns ------- self : RadarQuery Returns self for chaining calls
[ "Specify", "one", "or", "more", "stations", "for", "the", "query", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/radarserver.py#L23-L43
Unidata/siphon
siphon/radarserver.py
RadarServer.validate_query
def validate_query(self, query): """Validate a query. Determines whether `query` is well-formed. This includes checking for all required parameters, as well as checking parameters for valid values. Parameters ---------- query : RadarQuery The query to validate Returns ------- valid : bool Whether `query` is valid. """ valid = True # Make sure all stations are in the table if 'stn' in query.spatial_query: valid = valid and all(stid in self.stations for stid in query.spatial_query['stn']) if query.var: valid = valid and all(var in self.variables for var in query.var) return valid
python
def validate_query(self, query): """Validate a query. Determines whether `query` is well-formed. This includes checking for all required parameters, as well as checking parameters for valid values. Parameters ---------- query : RadarQuery The query to validate Returns ------- valid : bool Whether `query` is valid. """ valid = True # Make sure all stations are in the table if 'stn' in query.spatial_query: valid = valid and all(stid in self.stations for stid in query.spatial_query['stn']) if query.var: valid = valid and all(var in self.variables for var in query.var) return valid
[ "def", "validate_query", "(", "self", ",", "query", ")", ":", "valid", "=", "True", "# Make sure all stations are in the table", "if", "'stn'", "in", "query", ".", "spatial_query", ":", "valid", "=", "valid", "and", "all", "(", "stid", "in", "self", ".", "stations", "for", "stid", "in", "query", ".", "spatial_query", "[", "'stn'", "]", ")", "if", "query", ".", "var", ":", "valid", "=", "valid", "and", "all", "(", "var", "in", "self", ".", "variables", "for", "var", "in", "query", ".", "var", ")", "return", "valid" ]
Validate a query. Determines whether `query` is well-formed. This includes checking for all required parameters, as well as checking parameters for valid values. Parameters ---------- query : RadarQuery The query to validate Returns ------- valid : bool Whether `query` is valid.
[ "Validate", "a", "query", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/radarserver.py#L101-L127
Unidata/siphon
siphon/radarserver.py
RadarServer.get_catalog
def get_catalog(self, query): """Fetch a parsed THREDDS catalog from the radar server. Requests a catalog of radar data files data from the radar server given the parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance. Parameters ---------- query : RadarQuery The parameters to send to the radar server Returns ------- catalog : TDSCatalog The catalog of matching data files Raises ------ :class:`~siphon.http_util.BadQueryError` When the query cannot be handled by the server See Also -------- get_catalog_raw """ # TODO: Refactor TDSCatalog so we don't need two requests, or to do URL munging try: url = self._base[:-1] if self._base[-1] == '/' else self._base url += '?' + str(query) return TDSCatalog(url) except ET.ParseError: raise BadQueryError(self.get_catalog_raw(query))
python
def get_catalog(self, query): """Fetch a parsed THREDDS catalog from the radar server. Requests a catalog of radar data files data from the radar server given the parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance. Parameters ---------- query : RadarQuery The parameters to send to the radar server Returns ------- catalog : TDSCatalog The catalog of matching data files Raises ------ :class:`~siphon.http_util.BadQueryError` When the query cannot be handled by the server See Also -------- get_catalog_raw """ # TODO: Refactor TDSCatalog so we don't need two requests, or to do URL munging try: url = self._base[:-1] if self._base[-1] == '/' else self._base url += '?' + str(query) return TDSCatalog(url) except ET.ParseError: raise BadQueryError(self.get_catalog_raw(query))
[ "def", "get_catalog", "(", "self", ",", "query", ")", ":", "# TODO: Refactor TDSCatalog so we don't need two requests, or to do URL munging", "try", ":", "url", "=", "self", ".", "_base", "[", ":", "-", "1", "]", "if", "self", ".", "_base", "[", "-", "1", "]", "==", "'/'", "else", "self", ".", "_base", "url", "+=", "'?'", "+", "str", "(", "query", ")", "return", "TDSCatalog", "(", "url", ")", "except", "ET", ".", "ParseError", ":", "raise", "BadQueryError", "(", "self", ".", "get_catalog_raw", "(", "query", ")", ")" ]
Fetch a parsed THREDDS catalog from the radar server. Requests a catalog of radar data files data from the radar server given the parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance. Parameters ---------- query : RadarQuery The parameters to send to the radar server Returns ------- catalog : TDSCatalog The catalog of matching data files Raises ------ :class:`~siphon.http_util.BadQueryError` When the query cannot be handled by the server See Also -------- get_catalog_raw
[ "Fetch", "a", "parsed", "THREDDS", "catalog", "from", "the", "radar", "server", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/radarserver.py#L129-L161
Unidata/siphon
siphon/simplewebservice/acis.py
acis_request
def acis_request(method, params): """Request data from the ACIS Web Services API. Makes a request from the ACIS Web Services API for data based on a given method (StnMeta,StnData,MultiStnData,GridData,General) and parameters string. Information about the parameters can be obtained at: http://www.rcc-acis.org/docs_webservices.html If a connection to the API fails, then it will raise an exception. Some bad calls will also return empty dictionaries. ACIS Web Services is a distributed system! A call to the main URL can be delivered to any climate center running a public instance of the service. This makes the calls efficient, but also occasionaly results in failed calls when a server you are directed to is having problems. Generally, reconnecting after waiting a few seconds will resolve a problem. If problems are persistent, contact ACIS developers at the High Plains Regional Climate Center or Northeast Regional Climate Center who will look into server issues. Parameters ---------- method : str The Web Services request method (StnMeta, StnData, MultiStnData, GridData, General) params : dict A JSON array of parameters (See Web Services API) Returns ------- A dictionary of data based on the JSON parameters Raises ------ :class: `ACIS_API_Exception` When the API is unable to establish a connection or returns unparsable data. """ base_url = 'http://data.rcc-acis.org/' # ACIS Web API URL timeout = 300 if method == 'MultiStnData' else 60 try: response = session_manager.create_session().post(base_url + method, json=params, timeout=timeout) return response.json() except requests.exceptions.Timeout: raise AcisApiException('Connection Timeout') except requests.exceptions.TooManyRedirects: raise AcisApiException('Bad URL. Check your ACIS connection method string.') except ValueError: raise AcisApiException('No data returned! The ACIS parameter dictionary' 'may be incorrectly formatted')
python
def acis_request(method, params): """Request data from the ACIS Web Services API. Makes a request from the ACIS Web Services API for data based on a given method (StnMeta,StnData,MultiStnData,GridData,General) and parameters string. Information about the parameters can be obtained at: http://www.rcc-acis.org/docs_webservices.html If a connection to the API fails, then it will raise an exception. Some bad calls will also return empty dictionaries. ACIS Web Services is a distributed system! A call to the main URL can be delivered to any climate center running a public instance of the service. This makes the calls efficient, but also occasionaly results in failed calls when a server you are directed to is having problems. Generally, reconnecting after waiting a few seconds will resolve a problem. If problems are persistent, contact ACIS developers at the High Plains Regional Climate Center or Northeast Regional Climate Center who will look into server issues. Parameters ---------- method : str The Web Services request method (StnMeta, StnData, MultiStnData, GridData, General) params : dict A JSON array of parameters (See Web Services API) Returns ------- A dictionary of data based on the JSON parameters Raises ------ :class: `ACIS_API_Exception` When the API is unable to establish a connection or returns unparsable data. """ base_url = 'http://data.rcc-acis.org/' # ACIS Web API URL timeout = 300 if method == 'MultiStnData' else 60 try: response = session_manager.create_session().post(base_url + method, json=params, timeout=timeout) return response.json() except requests.exceptions.Timeout: raise AcisApiException('Connection Timeout') except requests.exceptions.TooManyRedirects: raise AcisApiException('Bad URL. Check your ACIS connection method string.') except ValueError: raise AcisApiException('No data returned! The ACIS parameter dictionary' 'may be incorrectly formatted')
[ "def", "acis_request", "(", "method", ",", "params", ")", ":", "base_url", "=", "'http://data.rcc-acis.org/'", "# ACIS Web API URL", "timeout", "=", "300", "if", "method", "==", "'MultiStnData'", "else", "60", "try", ":", "response", "=", "session_manager", ".", "create_session", "(", ")", ".", "post", "(", "base_url", "+", "method", ",", "json", "=", "params", ",", "timeout", "=", "timeout", ")", "return", "response", ".", "json", "(", ")", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "raise", "AcisApiException", "(", "'Connection Timeout'", ")", "except", "requests", ".", "exceptions", ".", "TooManyRedirects", ":", "raise", "AcisApiException", "(", "'Bad URL. Check your ACIS connection method string.'", ")", "except", "ValueError", ":", "raise", "AcisApiException", "(", "'No data returned! The ACIS parameter dictionary'", "'may be incorrectly formatted'", ")" ]
Request data from the ACIS Web Services API. Makes a request from the ACIS Web Services API for data based on a given method (StnMeta,StnData,MultiStnData,GridData,General) and parameters string. Information about the parameters can be obtained at: http://www.rcc-acis.org/docs_webservices.html If a connection to the API fails, then it will raise an exception. Some bad calls will also return empty dictionaries. ACIS Web Services is a distributed system! A call to the main URL can be delivered to any climate center running a public instance of the service. This makes the calls efficient, but also occasionaly results in failed calls when a server you are directed to is having problems. Generally, reconnecting after waiting a few seconds will resolve a problem. If problems are persistent, contact ACIS developers at the High Plains Regional Climate Center or Northeast Regional Climate Center who will look into server issues. Parameters ---------- method : str The Web Services request method (StnMeta, StnData, MultiStnData, GridData, General) params : dict A JSON array of parameters (See Web Services API) Returns ------- A dictionary of data based on the JSON parameters Raises ------ :class: `ACIS_API_Exception` When the API is unable to establish a connection or returns unparsable data.
[ "Request", "data", "from", "the", "ACIS", "Web", "Services", "API", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/simplewebservice/acis.py#L11-L63
Unidata/siphon
siphon/ncss.py
parse_xml
def parse_xml(data, handle_units): """Parse XML data returned by NCSS.""" root = ET.fromstring(data) return squish(parse_xml_dataset(root, handle_units))
python
def parse_xml(data, handle_units): """Parse XML data returned by NCSS.""" root = ET.fromstring(data) return squish(parse_xml_dataset(root, handle_units))
[ "def", "parse_xml", "(", "data", ",", "handle_units", ")", ":", "root", "=", "ET", ".", "fromstring", "(", "data", ")", "return", "squish", "(", "parse_xml_dataset", "(", "root", ",", "handle_units", ")", ")" ]
Parse XML data returned by NCSS.
[ "Parse", "XML", "data", "returned", "by", "NCSS", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L315-L318
Unidata/siphon
siphon/ncss.py
parse_xml_point
def parse_xml_point(elem): """Parse an XML point tag.""" point = {} units = {} for data in elem.findall('data'): name = data.get('name') unit = data.get('units') point[name] = float(data.text) if name != 'date' else parse_iso_date(data.text) if unit: units[name] = unit return point, units
python
def parse_xml_point(elem): """Parse an XML point tag.""" point = {} units = {} for data in elem.findall('data'): name = data.get('name') unit = data.get('units') point[name] = float(data.text) if name != 'date' else parse_iso_date(data.text) if unit: units[name] = unit return point, units
[ "def", "parse_xml_point", "(", "elem", ")", ":", "point", "=", "{", "}", "units", "=", "{", "}", "for", "data", "in", "elem", ".", "findall", "(", "'data'", ")", ":", "name", "=", "data", ".", "get", "(", "'name'", ")", "unit", "=", "data", ".", "get", "(", "'units'", ")", "point", "[", "name", "]", "=", "float", "(", "data", ".", "text", ")", "if", "name", "!=", "'date'", "else", "parse_iso_date", "(", "data", ".", "text", ")", "if", "unit", ":", "units", "[", "name", "]", "=", "unit", "return", "point", ",", "units" ]
Parse an XML point tag.
[ "Parse", "an", "XML", "point", "tag", "." ]
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L321-L331