Search is not available for this dataset
text
stringlengths
75
104k
def twitter_bootstrap(element, args=""): """ valid layouts are: - default - search - inline - horizontal {{ form|twitter_bootstrap:"default" }} {{ form|twitter_bootstrap:"horizontal" }} {{ form|twitter_bootstrap:"horizontal,[xs,sm,md,lg],[1-12],[1-12]" }} """ element_type = element.__class__.__name__.lower() args_list = [arg.strip() for arg in args.split(',')] layout = (len(args_list) and args_list[0]) or "default" size = (len(args_list) > 1 and args_list[1]) or "sm" label_cols = (len(args_list) > 2 and args_list[2]) or "2" input_cols = (len(args_list) > 3 and args_list[3]) or str(12 - int(label_cols)) lbl_size_class = "col-%s-%s" % (size, label_cols) lbl_size_offset_class = "col-%s-offset-%s" % (size, label_cols) ipt_size_class = "col-%s-%s" % (size, input_cols) if layout not in ["default", "search", "inline", "horizontal"]: layout = "default" if element_type == 'boundfield': pass else: if layout == "default": field_template_file = "field.html" else: field_template_file = "%s_field.html" % layout template = get_template("twitter_bootstrap_form/form.html") context = { 'form': element, 'layout': layout, 'lbl_size_class': lbl_size_class, 'lbl_size_offset_class': lbl_size_offset_class, 'ipt_size_class': ipt_size_class, 'required_suffix': settings.BOOTSTRAP_REQUIRED_SUFFIX, 'field_template': "twitter_bootstrap_form/%s" % field_template_file} return template.render(context)
def breakfast(self, message="Breakfast is ready", shout: bool = False): """Say something in the morning""" return self.helper.output(message, shout)
def lunch(self, message="Time for lunch", shout: bool = False): """Say something in the afternoon""" return self.helper.output(message, shout)
def dinner(self, message="Dinner is served", shout: bool = False): """Say something in the evening""" return self.helper.output(message, shout)
def main(): """Command line entrypoint to reduce technote metadata. """ parser = argparse.ArgumentParser( description='Discover and ingest metadata from document sources, ' 'including lsstdoc-based LaTeX documents and ' 'reStructuredText-based technotes. Metadata can be ' 'upserted into the LSST Projectmeta MongoDB.') parser.add_argument( '--ltd-product', dest='ltd_product_url', help='URL of an LSST the Docs product ' '(https://keeper.lsst.codes/products/<slug>). If provided, ' 'only this document will be ingested.') parser.add_argument( '--github-token', help='GitHub personal access token.') parser.add_argument( '--mongodb-uri', help='MongoDB connection URI. If provided, metadata will be loaded ' 'into the Projectmeta database. Omit this argument to just ' 'test the ingest pipeline.') parser.add_argument( '--mongodb-db', default='lsstprojectmeta', help='Name of MongoDB database') parser.add_argument( '--mongodb-collection', default='resources', help='Name of the MongoDB collection for projectmeta resources') args = parser.parse_args() # Configure the root logger stream_handler = logging.StreamHandler() stream_formatter = logging.Formatter( '%(asctime)s %(levelname)8s %(name)s | %(message)s') stream_handler.setFormatter(stream_formatter) root_logger = logging.getLogger() root_logger.addHandler(stream_handler) root_logger.setLevel(logging.WARNING) # Configure app logger app_logger = logging.getLogger('lsstprojectmeta') app_logger.setLevel(logging.DEBUG) if args.mongodb_uri is not None: mongo_client = AsyncIOMotorClient(args.mongodb_uri, ssl=True) collection = mongo_client[args.mongodb_db][args.mongodb_collection] else: collection = None loop = asyncio.get_event_loop() if args.ltd_product_url is not None: # Run single technote loop.run_until_complete(run_single_ltd_doc(args.ltd_product_url, args.github_token, collection)) else: # Run bulk technote processing loop.run_until_complete(run_bulk_etl(args.github_token, collection))
async def process_ltd_doc_products(session, product_urls, github_api_token, mongo_collection=None): """Run a pipeline to process extract, transform, and load metadata for multiple LSST the Docs-hosted projects Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. product_urls : `list` of `str` List of LSST the Docs product URLs. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. """ tasks = [asyncio.ensure_future( process_ltd_doc(session, github_api_token, product_url, mongo_collection=mongo_collection)) for product_url in product_urls] await asyncio.gather(*tasks)
async def process_ltd_doc(session, github_api_token, ltd_product_url, mongo_collection=None): """Ingest any kind of LSST document hosted on LSST the Docs from its source. Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. ltd_product_url : `str` URL of the technote's product resource in the LTD Keeper API. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted into the MongoDB collection. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. .. `GitHub personal access token guide`: https://ls.st/41d """ logger = logging.getLogger(__name__) ltd_product_data = await get_ltd_product(session, url=ltd_product_url) # Ensure the LTD product is a document product_name = ltd_product_data['slug'] doc_handle_match = DOCUMENT_HANDLE_PATTERN.match(product_name) if doc_handle_match is None: logger.debug('%s is not a document repo', product_name) return # Figure out the format of the document by probing for metadata files. # reStructuredText-based Sphinx documents have metadata.yaml file. try: return await process_sphinx_technote(session, github_api_token, ltd_product_data, mongo_collection=mongo_collection) except NotSphinxTechnoteError: # Catch error so we can try the next format logger.debug('%s is not a Sphinx-based technote.', product_name) except Exception: # Something bad happened trying to process the technote. # Log and just move on. logger.exception('Unexpected error trying to process %s', product_name) return # Try interpreting it as a Lander page with a /metadata.jsonld document try: return await process_lander_page(session, github_api_token, ltd_product_data, mongo_collection=mongo_collection) except NotLanderPageError: # Catch error so we can try the next format logger.debug('%s is not a Lander page with a metadata.jsonld file.', product_name) except Exception: # Something bad happened; log and move on logger.exception('Unexpected error trying to process %s', product_name) return
def decorator(decorator_func): """Allows a decorator to be called with or without keyword arguments.""" assert callable(decorator_func), type(decorator_func) def _decorator(func=None, **kwargs): assert func is None or callable(func), type(func) if func: return decorator_func(func, **kwargs) else: def _decorator_helper(func): return decorator_func(func, **kwargs) return _decorator_helper return _decorator
def get_installation_token(installation_id, integration_jwt): """Create a GitHub token for an integration installation. Parameters ---------- installation_id : `int` Installation ID. This is available in the URL of the integration's **installation** ID. integration_jwt : `bytes` The integration's JSON Web Token (JWT). You can create this with `create_jwt`. Returns ------- token_obj : `dict` GitHub token object. Includes the fields: - ``token``: the token string itself. - ``expires_at``: date time string when the token expires. Example ------- The typical workflow for authenticating to an integration installation is: .. code-block:: python from dochubadapter.github import auth jwt = auth.create_jwt(integration_id, private_key_path) token_obj = auth.get_installation_token(installation_id, jwt) print(token_obj['token']) Notes ----- See https://developer.github.com/early-access/integrations/authentication/#as-an-installation for more information """ api_root = 'https://api.github.com' url = '{root}/installations/{id_:d}/access_tokens'.format( api_root=api_root, id_=installation_id) headers = { 'Authorization': 'Bearer {0}'.format(integration_jwt.decode('utf-8')), 'Accept': 'application/vnd.github.machine-man-preview+json' } resp = requests.post(url, headers=headers) resp.raise_for_status() return resp.json()
def create_jwt(integration_id, private_key_path): """Create a JSON Web Token to authenticate a GitHub Integration or installation. Parameters ---------- integration_id : `int` Integration ID. This is available from the GitHub integration's homepage. private_key_path : `str` Path to the integration's private key (a ``.pem`` file). Returns ------- jwt : `bytes` JSON Web Token that is good for 9 minutes. Notes ----- The JWT is encoded with the RS256 algorithm. It includes a payload with fields: - ``'iat'``: The current time, as an `int` timestamp. - ``'exp'``: Expiration time, as an `int timestamp. The expiration time is set of 9 minutes in the future (maximum allowance is 10 minutes). - ``'iss'``: The integration ID (`int`). For more information, see https://developer.github.com/early-access/integrations/authentication/. """ integration_id = int(integration_id) with open(private_key_path, 'rb') as f: cert_bytes = f.read() now = datetime.datetime.now() expiration_time = now + datetime.timedelta(minutes=9) payload = { # Issued at time 'iat': int(now.timestamp()), # JWT expiration time (10 minute maximum) 'exp': int(expiration_time.timestamp()), # Integration's GitHub identifier 'iss': integration_id } return jwt.encode(payload, cert_bytes, algorithm='RS256')
def get_macros(tex_source): r"""Get all macro definitions from TeX source, supporting multiple declaration patterns. Parameters ---------- tex_source : `str` TeX source content. Returns ------- macros : `dict` Keys are macro names (including leading ``\``) and values are the content (as `str`) of the macros. Notes ----- This function uses the following function to scrape macros of different types: - `get_def_macros` - `get_newcommand_macros` This macro scraping has the following caveats: - Macro definition (including content) must all occur on one line. - Macros with arguments are not supported. """ macros = {} macros.update(get_def_macros(tex_source)) macros.update(get_newcommand_macros(tex_source)) return macros
def get_def_macros(tex_source): r"""Get all ``\def`` macro definition from TeX source. Parameters ---------- tex_source : `str` TeX source content. Returns ------- macros : `dict` Keys are macro names (including leading ``\``) and values are the content (as `str`) of the macros. Notes ----- ``\def`` macros with arguments are not supported. """ macros = {} for match in DEF_PATTERN.finditer(tex_source): macros[match.group('name')] = match.group('content') return macros
def get_newcommand_macros(tex_source): r"""Get all ``\newcommand`` macro definition from TeX source. Parameters ---------- tex_source : `str` TeX source content. Returns ------- macros : `dict` Keys are macro names (including leading ``\``) and values are the content (as `str`) of the macros. Notes ----- ``\newcommand`` macros with arguments are not supported. """ macros = {} command = LatexCommand( 'newcommand', {'name': 'name', 'required': True, 'bracket': '{'}, {'name': 'content', 'required': True, 'bracket': '{'}) for macro in command.parse(tex_source): macros[macro['name']] = macro['content'] return macros
def load(directory_name, module_name): """Try to load and return a module Will add DIRECTORY_NAME to sys.path and tries to import MODULE_NAME. For example: load("~/.yaz", "yaz_extension") """ directory_name = os.path.expanduser(directory_name) if os.path.isdir(directory_name) and directory_name not in sys.path: sys.path.append(directory_name) try: return importlib.import_module(module_name) except ImportError: pass
def make_aware(value, timezone): """ Makes a naive datetime.datetime in a given time zone aware. """ if hasattr(timezone, 'localize') and value not in (datetime.datetime.min, datetime.datetime.max): # available for pytz time zones return timezone.localize(value, is_dst=None) else: # may be wrong around DST changes return value.replace(tzinfo=timezone)
def make_naive(value, timezone): """ Makes an aware datetime.datetime naive in a given time zone. """ value = value.astimezone(timezone) if hasattr(timezone, 'normalize'): # available for pytz time zones value = timezone.normalize(value) return value.replace(tzinfo=None)
def from_element(root, timezone): """Return a Schedule object based on an lxml Element for the <schedule> tag. timezone is a tzinfo object, ideally from pytz.""" assert root.tag == 'schedule' if root.xpath('intervals'): return _ScheduleIntervals(root, timezone) elif root.xpath('recurring_schedules'): return _ScheduleRecurring(root, timezone) raise NotImplementedError
def to_timezone(self, dt): """Converts a datetime to the timezone of this Schedule.""" if timezone.is_aware(dt): return dt.astimezone(self.timezone) else: return timezone.make_aware(dt, self.timezone)
def intervals(self, range_start=datetime.datetime.min, range_end=datetime.datetime.max): """Returns a list of tuples of start/end datetimes for when the schedule is active during the provided range.""" raise NotImplementedError
def next_interval(self, after=None): """Returns the next Period this event is in effect, or None if the event has no remaining periods.""" if after is None: after = timezone.now() after = self.to_timezone(after) return next(self.intervals(range_start=after), None)
def includes(self, query): """Does this schedule include the provided time? query should be a datetime (naive or timezone-aware)""" query = self.to_timezone(query) return any(self.intervals(range_start=query, range_end=query))
def exceptions(self): """A dict of dates -> [Period time tuples] representing exceptions to the base recurrence pattern.""" ex = {} for sd in self.root.xpath('exceptions/exception'): bits = str(sd.text).split(' ') date = text_to_date(bits.pop(0)) ex.setdefault(date, []).extend([ _time_text_to_period(t) for t in bits ]) return ex
def exception_periods(self, range_start=datetime.date.min, range_end=datetime.date.max): """Returns a list of Period tuples for each period represented in an <exception> that falls between range_start and range_end.""" periods = [] for exception_date, exception_times in self.exceptions.items(): if exception_date >= range_start and exception_date <= range_end: for exception_time in exception_times: periods.append( Period( self.timezone.localize(datetime.datetime.combine(exception_date, exception_time.start)), self.timezone.localize(datetime.datetime.combine(exception_date, exception_time.end)) ) ) periods.sort() return periods
def includes(self, query): """Does this schedule include the provided time? query should be a datetime (naive or timezone-aware)""" query = self.to_timezone(query) query_date = query.date() query_time = query.time() # Is the provided time an exception for this schedule? specific = self.exceptions.get(query_date) if specific is not None: if len(specific) == 0: # Not in effect on this day return False for period in specific: if query_time >= period.start and query_time <= period.end: return True return False # It's not an exception. Is it within a recurring schedule? return any(sched.includes(query_date, query_time) for sched in self._recurring_schedules)
def _daily_periods(self, range_start, range_end): """Returns an iterator of Period tuples for every day this event is in effect, between range_start and range_end.""" specific = set(self.exceptions.keys()) return heapq.merge(self.exception_periods(range_start, range_end), *[ sched.daily_periods(range_start=range_start, range_end=range_end, exclude_dates=specific) for sched in self._recurring_schedules ])
def intervals(self, range_start=datetime.datetime.min, range_end=datetime.datetime.max): """Returns an iterator of Period tuples for continuous stretches of time during which this event is in effect, between range_start and range_end.""" # At the moment the algorithm works on periods split by calendar day, one at a time, # merging them if they're continuous; to avoid looping infinitely for infinitely long # periods, it splits periods as soon as they reach 60 days. # This algorithm could likely be improved to get rid of this restriction and improve # efficiency, so code should not rely on this behaviour. current_period = None max_continuous_days = 60 range_start = self.to_timezone(range_start) range_end = self.to_timezone(range_end) for period in self._daily_periods(range_start.date(), range_end.date()): if period.end < range_start or period.start > range_end: continue if current_period is None: current_period = period else: if ( ((period.start < current_period.end) or (period.start - current_period.end) <= datetime.timedelta(minutes=1)) and (current_period.end - current_period.start) < datetime.timedelta(days=max_continuous_days)): # Merge current_period = Period(current_period.start, period.end) else: yield current_period current_period = period if current_period: yield current_period
def includes(self, query_date, query_time=None): """Does this schedule include the provided time? query_date and query_time are date and time objects, interpreted in this schedule's timezone""" if self.start_date and query_date < self.start_date: return False if self.end_date and query_date > self.end_date: return False if query_date.weekday() not in self.weekdays: return False if not query_time: return True if query_time >= self.period.start and query_time <= self.period.end: return True return False
def daily_periods(self, range_start=datetime.date.min, range_end=datetime.date.max, exclude_dates=tuple()): """Returns an iterator of Period tuples for every day this schedule is in effect, between range_start and range_end.""" tz = self.timezone period = self.period weekdays = self.weekdays current_date = max(range_start, self.start_date) end_date = range_end if self.end_date: end_date = min(end_date, self.end_date) while current_date <= end_date: if current_date.weekday() in weekdays and current_date not in exclude_dates: yield Period( tz.localize(datetime.datetime.combine(current_date, period.start)), tz.localize(datetime.datetime.combine(current_date, period.end)) ) current_date += datetime.timedelta(days=1)
def period(self): """A Period tuple representing the daily start and end time.""" start_time = self.root.findtext('daily_start_time') if start_time: return Period(text_to_time(start_time), text_to_time(self.root.findtext('daily_end_time'))) return Period(datetime.time(0, 0), datetime.time(23, 59))
def weekdays(self): """A set of integers representing the weekdays the schedule recurs on, with Monday = 0 and Sunday = 6.""" if not self.root.xpath('days'): return set(range(7)) return set(int(d) - 1 for d in self.root.xpath('days/day/text()'))
def temp_db(db, name=None): """ A context manager that creates a temporary database. Useful for automated tests. Parameters ---------- db: object a preconfigured DB object name: str, optional name of the database to be created. (default: globally unique name) """ if name is None: name = temp_name() db.create(name) if not db.exists(name): raise DatabaseError('failed to create database %s!') try: yield name finally: db.drop(name) if db.exists(name): raise DatabaseError('failed to drop database %s!')
async def _download_text(url, session): """Asynchronously request a URL and get the encoded text content of the body. Parameters ---------- url : `str` URL to download. session : `aiohttp.ClientSession` An open aiohttp session. Returns ------- content : `str` Content downloaded from the URL. """ logger = logging.getLogger(__name__) async with session.get(url) as response: # aiohttp decodes the content to a Python string logger.info('Downloading %r', url) return await response.text()
async def _download_lsst_bibtex(bibtex_names): """Asynchronously download a set of lsst-texmf BibTeX bibliographies from GitHub. Parameters ---------- bibtex_names : sequence of `str` Names of lsst-texmf BibTeX files to download. For example: .. code-block:: python ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads'] Returns ------- bibtexs : `list` of `str` List of BibTeX file content, in the same order as ``bibtex_names``. """ blob_url_template = ( 'https://raw.githubusercontent.com/lsst/lsst-texmf/master/texmf/' 'bibtex/bib/{name}.bib' ) urls = [blob_url_template.format(name=name) for name in bibtex_names] tasks = [] async with ClientSession() as session: for url in urls: task = asyncio.ensure_future(_download_text(url, session)) tasks.append(task) return await asyncio.gather(*tasks)
def get_lsst_bibtex(bibtex_filenames=None): """Get content of lsst-texmf bibliographies. BibTeX content is downloaded from GitHub (``master`` branch of https://github.com/lsst/lsst-texmf or retrieved from an in-memory cache. Parameters ---------- bibtex_filenames : sequence of `str`, optional List of lsst-texmf BibTeX files to retrieve. These can be the filenames of lsst-bibtex files (for example, ``['lsst.bib', 'lsst-dm.bib']``) or names without an extension (``['lsst', 'lsst-dm']``). The default (recommended) is to get *all* lsst-texmf bibliographies: .. code-block:: python ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads'] Returns ------- bibtex : `dict` Dictionary with keys that are bibtex file names (such as ``'lsst'``, ``'lsst-dm'``). Values are the corresponding bibtex file content (`str`). """ logger = logging.getLogger(__name__) if bibtex_filenames is None: # Default lsst-texmf bibliography files bibtex_names = KNOWN_LSSTTEXMF_BIB_NAMES else: # Sanitize filenames (remove extensions, path) bibtex_names = [] for filename in bibtex_filenames: name = os.path.basename(os.path.splitext(filename)[0]) if name not in KNOWN_LSSTTEXMF_BIB_NAMES: logger.warning('%r is not a known lsst-texmf bib file', name) continue bibtex_names.append(name) # names of bibtex files not in cache uncached_names = [name for name in bibtex_names if name not in _LSSTTEXMF_BIB_CACHE] if len(uncached_names) > 0: # Download bibtex and put into the cache loop = asyncio.get_event_loop() future = asyncio.ensure_future(_download_lsst_bibtex(uncached_names)) loop.run_until_complete(future) for name, text in zip(bibtex_names, future.result()): _LSSTTEXMF_BIB_CACHE[name] = text return {name: _LSSTTEXMF_BIB_CACHE[name] for name in bibtex_names}
def get_bibliography(lsst_bib_names=None, bibtex=None): """Make a pybtex BibliographyData instance from standard lsst-texmf bibliography files and user-supplied bibtex content. Parameters ---------- lsst_bib_names : sequence of `str`, optional Names of lsst-texmf BibTeX files to include. For example: .. code-block:: python ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads'] Default is `None`, which includes all lsst-texmf bibtex files. bibtex : `str` BibTeX source content not included in lsst-texmf. This can be content from a import ``local.bib`` file. Returns ------- bibliography : `pybtex.database.BibliographyData` A pybtex bibliography database that includes all given sources: lsst-texmf bibliographies and ``bibtex``. """ bibtex_data = get_lsst_bibtex(bibtex_filenames=lsst_bib_names) # Parse with pybtex into BibliographyData instances pybtex_data = [pybtex.database.parse_string(_bibtex, 'bibtex') for _bibtex in bibtex_data.values()] # Also parse local bibtex content if bibtex is not None: pybtex_data.append(pybtex.database.parse_string(bibtex, 'bibtex')) # Merge BibliographyData bib = pybtex_data[0] if len(pybtex_data) > 1: for other_bib in pybtex_data[1:]: for key, entry in other_bib.entries.items(): bib.add_entry(key, entry) return bib
def get_url_from_entry(entry): """Get a usable URL from a pybtex entry. Parameters ---------- entry : `pybtex.database.Entry` A pybtex bibliography entry. Returns ------- url : `str` Best available URL from the ``entry``. Raises ------ NoEntryUrlError Raised when no URL can be made from the bibliography entry. Notes ----- The order of priority is: 1. ``url`` field 2. ``ls.st`` URL from the handle for ``@docushare`` entries. 3. ``adsurl`` 4. DOI """ if 'url' in entry.fields: return entry.fields['url'] elif entry.type.lower() == 'docushare': return 'https://ls.st/' + entry.fields['handle'] elif 'adsurl' in entry.fields: return entry.fields['adsurl'] elif 'doi' in entry.fields: return 'https://doi.org/' + entry.fields['doi'] else: raise NoEntryUrlError()
def get_authoryear_from_entry(entry, paren=False): """Get and format author-year text from a pybtex entry to emulate natbib citations. Parameters ---------- entry : `pybtex.database.Entry` A pybtex bibliography entry. parens : `bool`, optional Whether to add parentheses around the year. Default is `False`. Returns ------- authoryear : `str` The author-year citation text. """ def _format_last(person): """Reformat a pybtex Person into a last name. Joins all parts of a last name and strips "{}" wrappers. """ return ' '.join([n.strip('{}') for n in person.last_names]) if len(entry.persons['author']) > 0: # Grab author list persons = entry.persons['author'] elif len(entry.persons['editor']) > 0: # Grab editor list persons = entry.persons['editor'] else: raise AuthorYearError try: year = entry.fields['year'] except KeyError: raise AuthorYearError if paren and len(persons) == 1: template = '{author} ({year})' return template.format(author=_format_last(persons[0]), year=year) elif not paren and len(persons) == 1: template = '{author} {year}' return template.format(author=_format_last(persons[0]), year=year) elif paren and len(persons) == 2: template = '{author1} and {author2} ({year})' return template.format(author1=_format_last(persons[0]), author2=_format_last(persons[1]), year=year) elif not paren and len(persons) == 2: template = '{author1} and {author2} {year}' return template.format(author1=_format_last(persons[0]), author2=_format_last(persons[1]), year=year) elif not paren and len(persons) > 2: template = '{author} et al {year}' return template.format(author=_format_last(persons[0]), year=year) elif paren and len(persons) > 2: template = '{author} et al ({year})' return template.format(author=_format_last(persons[0]), year=year)
async def process_sphinx_technote(session, github_api_token, ltd_product_data, mongo_collection=None): """Extract, transform, and load Sphinx-based technote metadata. Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. ltd_product_data : `dict` Contents of ``metadata.yaml``, obtained via `download_metadata_yaml`. Data for this technote from the LTD Keeper API (``GET /products/<slug>``). Usually obtained via `lsstprojectmeta.ltd.get_ltd_product`. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted into the MongoDB collection. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. Raises ------ NotSphinxTechnoteError Raised when the LTD product cannot be interpreted as a Sphinx-based technote project because it's missing a metadata.yaml file in its GitHub repository. This implies that the LTD product *could* be of a different format. .. `GitHub personal access token guide`: https://ls.st/41d """ logger = logging.getLogger(__name__) github_url = ltd_product_data['doc_repo'] github_url = normalize_repo_root_url(github_url) repo_slug = parse_repo_slug_from_url(github_url) try: metadata_yaml = await download_metadata_yaml(session, github_url) except aiohttp.ClientResponseError as err: # metadata.yaml not found; probably not a Sphinx technote logger.debug('Tried to download %s\'s metadata.yaml, got status %d', ltd_product_data['slug'], err.code) raise NotSphinxTechnoteError() # Extract data from the GitHub API github_query = GitHubQuery.load('technote_repo') github_variables = { "orgName": repo_slug.owner, "repoName": repo_slug.repo } github_data = await github_request(session, github_api_token, query=github_query, variables=github_variables) try: jsonld = reduce_technote_metadata( github_url, metadata_yaml, github_data, ltd_product_data) except Exception as exception: message = "Issue building JSON-LD for technote %s" logger.exception(message, github_url, exception) raise if mongo_collection is not None: await _upload_to_mongodb(mongo_collection, jsonld) logger.info('Ingested technote %s into MongoDB', github_url) return jsonld
def reduce_technote_metadata(github_url, metadata, github_data, ltd_product_data): """Reduce a technote project's metadata from multiple sources into a single JSON-LD resource. Parameters ---------- github_url : `str` URL of the technote's GitHub repository. metadata : `dict` The parsed contents of ``metadata.yaml`` found in a technote's repository. github_data : `dict` The contents of the ``technote_repo`` GitHub GraphQL API query. ltd_product_data : `dict` JSON dataset for the technote corresponding to the ``/products/<product>`` of LTD Keeper. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. .. `GitHub personal access token guide`: https://ls.st/41d """ repo_slug = parse_repo_slug_from_url(github_url) # Initialize a schema.org/Report and schema.org/SoftwareSourceCode # linked data resource jsonld = { '@context': [ "https://raw.githubusercontent.com/codemeta/codemeta/2.0-rc/" "codemeta.jsonld", "http://schema.org"], '@type': ['Report', 'SoftwareSourceCode'], 'codeRepository': github_url } if 'url' in metadata: url = metadata['url'] elif 'published_url' in ltd_product_data: url = ltd_product_data['published_url'] else: raise RuntimeError('No identifying url could be found: ' '{}'.format(github_url)) jsonld['@id'] = url jsonld['url'] = url if 'series' in metadata and 'serial_number' in metadata: jsonld['reportNumber'] = '{series}-{serial_number}'.format(**metadata) else: raise RuntimeError('No reportNumber: {}'.format(github_url)) if 'doc_title' in metadata: jsonld['name'] = metadata['doc_title'] if 'description' in metadata: jsonld['description'] = metadata['description'] if 'authors' in metadata: jsonld['author'] = [{'@type': 'Person', 'name': author_name} for author_name in metadata['authors']] if 'last_revised' in metadata: # Prefer getting the 'last_revised' date from metadata.yaml # since it's considered an override. jsonld['dateModified'] = datetime.datetime.strptime( metadata['last_revised'], '%Y-%m-%d') else: # Fallback to parsing the date of the last commit to the # default branch on GitHub (usually `master`). try: _repo_data = github_data['data']['repository'] _master_data = _repo_data['defaultBranchRef'] jsonld['dateModified'] = datetime.datetime.strptime( _master_data['target']['committedDate'], '%Y-%m-%dT%H:%M:%SZ') except KeyError: pass try: _license_data = github_data['data']['repository']['licenseInfo'] _spdxId = _license_data['spdxId'] if _spdxId is not None: _spdx_url = 'https://spdx.org/licenses/{}.html'.format(_spdxId) jsonld['license'] = _spdx_url except KeyError: pass try: # Find the README(|.md|.rst|*) file in the repo root _master_data = github_data['data']['repository']['defaultBranchRef'] _files = _master_data['target']['tree']['entries'] for _node in _files: filename = _node['name'] normalized_filename = filename.lower() if normalized_filename.startswith('readme'): readme_url = make_raw_content_url(repo_slug, 'master', filename) jsonld['readme'] = readme_url break except KeyError: pass # Assume Travis is the CI service (always true at the moment) travis_url = 'https://travis-ci.org/{}'.format(repo_slug.full) jsonld['contIntegration'] = travis_url return jsonld
async def download_metadata_yaml(session, github_url): """Download the metadata.yaml file from a technote's GitHub repository. """ metadata_yaml_url = _build_metadata_yaml_url(github_url) async with session.get(metadata_yaml_url) as response: response.raise_for_status() yaml_data = await response.text() return yaml.safe_load(yaml_data)
def parse_repo_slug_from_url(github_url): """Get the slug, <owner>/<repo_name>, for a GitHub repository from its URL. Parameters ---------- github_url : `str` URL of a GitHub repository. Returns ------- repo_slug : `RepoSlug` Repository slug with fields ``full``, ``owner``, and ``repo``. See `RepoSlug` for details. Raises ------ RuntimeError Raised if the URL cannot be parsed. """ match = GITHUB_SLUG_PATTERN.match(github_url) if not match: message = 'Could not parse GitHub slug from {}'.format(github_url) raise RuntimeError(message) _full = '/'.join((match.group('org'), match.group('name'))) return RepoSlug(_full, match.group('org'), match.group('name'))
def make_raw_content_url(repo_slug, git_ref, file_path): """Make a raw content (raw.githubusercontent.com) URL to a file. Parameters ---------- repo_slug : `str` or `RepoSlug` The repository slug, formatted as either a `str` (``'owner/name'``) or a `RepoSlug` object (created by `parse_repo_slug_from_url`). git_ref : `str` The git ref: a branch name, commit hash, or tag name. file_path : `str` The POSIX path of the file in the repository tree. """ if isinstance(repo_slug, RepoSlug): slug_str = repo_slug.full else: slug_str = repo_slug if file_path.startswith('/'): file_path = file_path.lstrip('/') template = 'https://raw.githubusercontent.com/{slug}/{git_ref}/{path}' return template.format( slug=slug_str, git_ref=git_ref, path=file_path)
def tz(self): """Return the timezone. If none is set use system timezone""" if not self._tz: self._tz = tzlocal.get_localzone().zone return self._tz
def add_tag(self, _tags): """Add tag(s) to a DayOneEntry""" if isinstance(_tags, list): for t in _tags: self.tags.append(t) else: self.tags.append(_tags)
def time(self, t): """Convert any timestamp into a datetime and save as _time""" _time = arrow.get(t).format('YYYY-MM-DDTHH:mm:ss') self._time = datetime.datetime.strptime(_time, '%Y-%m-%dT%H:%M:%S')
def as_dict(self): """Return a dict that represents the DayOneEntry""" entry_dict = {} entry_dict['UUID'] = self.uuid entry_dict['Creation Date'] = self.time entry_dict['Time Zone'] = self.tz if self.tags: entry_dict['Tags'] = self.tags entry_dict['Entry Text'] = self.text entry_dict['Starred'] = self.starred entry_dict['Location'] = self.location return entry_dict
def save(self, entry, with_location=True, debug=False): """Saves a DayOneEntry as a plist""" entry_dict = {} if isinstance(entry, DayOneEntry): # Get a dict of the DayOneEntry entry_dict = entry.as_dict() else: entry_dict = entry # Set the UUID entry_dict['UUID'] = uuid.uuid4().get_hex() if with_location and not entry_dict['Location']: entry_dict['Location'] = self.get_location() # Do we have everything needed? if not all ((entry_dict['UUID'], entry_dict['Time Zone'], entry_dict['Entry Text'])): print "You must provide: Time zone, UUID, Creation Date, Entry Text" return False if debug is False: file_path = self._file_path(entry_dict['UUID']) plistlib.writePlist(entry_dict, file_path) else: plist = plistlib.writePlistToString(entry_dict) print plist return True
def _file_path(self, uid): """Create and return full file path for DayOne entry""" file_name = '%s.doentry' % (uid) return os.path.join(self.dayone_journal_path, file_name)
def combine(self, members, output_file, dimension=None, start_index=None, stop_index=None, stride=None): """ Combine many files into a single file on disk. Defaults to using the 'time' dimension. """ nco = None try: nco = Nco() except BaseException: # This is not necessarily an import error (could be wrong PATH) raise ImportError("NCO not found. The NCO python bindings are required to use 'Collection.combine'.") if len(members) > 0 and hasattr(members[0], 'path'): # A member DotDoct was passed in, we only need the paths members = [ m.path for m in members ] options = ['-4'] # NetCDF4 options += ['-L', '3'] # Level 3 compression options += ['-h'] # Don't append to the history global attribute if dimension is not None: if start_index is None: start_index = 0 if stop_index is None: stop_index = '' if stride is None: stride = 1 options += ['-d', '{0},{1},{2},{3}'.format(dimension, start_index, stop_index, stride)] nco.ncrcat(input=members, output=output_file, options=options)
def main(argv=None, white_list=None, load_yaz_extension=True): """The entry point for a yaz script This will almost always be called from a python script in the following manner: if __name__ == "__main__": yaz.main() This function will perform the following steps: 1. It will load any additional python code from the yaz_extension python module located in the ~/.yaz directory when LOAD_YAZ_EXTENSION is True and the yaz_extension module exists 2. It collects all yaz tasks and plugins. When WHITE_LIST is a non-empty list, only the tasks and plugins located therein will be considered 3. It will parse arguments from ARGV, or the command line when ARGV is not given, resulting in a yaz task or a parser help message. 4. When a suitable task is found, this task is executed. In case of a task which is part of a plugin, i.e. class, then this plugin is initialized, possibly resulting in other plugins to also be initialized if there are marked as `@yaz.dependency`. """ assert argv is None or isinstance(argv, list), type(argv) assert white_list is None or isinstance(white_list, list), type(white_list) assert isinstance(load_yaz_extension, bool), type(load_yaz_extension) argv = sys.argv if argv is None else argv assert len(argv) > 0, len(argv) if load_yaz_extension: load("~/.yaz", "yaz_extension") parser = Parser(prog=argv[0]) parser.add_task_tree(get_task_tree(white_list)) task, kwargs = parser.parse_arguments(argv) if task: try: result = task(**kwargs) # when the result is a boolean, exit with 0 (success) or 1 (failure) if isinstance(result, bool): code = 0 if result else 1 output = None # when the result is an integer, exit with that integer value elif isinstance(result, int): code = result % 256 output = None # otherwise exit with 0 (success) and print the result else: code = 0 output = result # when yaz.Error occurs, exit with the given return code and print the error message # when any other error occurs, let python handle the exception (i.e. exit(1) and print call stack) except Error as error: code = error.return_code output = error else: # when no task is found to execute, exit with 1 (failure) and print the help text code = 1 output = parser.format_help().rstrip() if output is not None: print(output) sys.exit(code)
def get_task_tree(white_list=None): """Returns a tree of Task instances The tree is comprised of dictionaries containing strings for keys and either dictionaries or Task instances for values. When WHITE_LIST is given, only the tasks and plugins in this list will become part of the task tree. The WHITE_LIST may contain either strings, corresponding to the task of plugin __qualname__, or, preferable, the WHITE_LIST contains links to the task function or plugin class instead. """ assert white_list is None or isinstance(white_list, list), type(white_list) if white_list is not None: white_list = set(item if isinstance(item, str) else item.__qualname__ for item in white_list) tree = dict((task.qualified_name, task) for task in _task_list.values() if white_list is None or task.qualified_name in white_list) plugins = get_plugin_list() for plugin in [plugin for plugin in plugins.values() if white_list is None or plugin.__qualname__ in white_list]: tasks = [func for _, func in inspect.getmembers(plugin) if inspect.isfunction(func) and hasattr(func, "yaz_task_config")] if len(tasks) == 0: continue node = tree for name in plugin.__qualname__.split("."): if not name in node: node[name] = {} node = node[name] for func in tasks: logger.debug("Found task %s", func) node[func.__name__] = Task(plugin_class=plugin, func=func, config=func.yaz_task_config) return tree
def task(func, **config): """Declare a function or method to be a Yaz task @yaz.task def talk(message: str = "Hello World!"): return message Or... group multiple tasks together class Tools(yaz.Plugin): @yaz.task def say(self, message: str = "Hello World!"): return message @yaz.task(option__choices=["A", "B", "C"]) def choose(self, option: str = "A"): return option """ if func.__name__ == func.__qualname__: assert not func.__qualname__ in _task_list, "Can not define the same task \"{}\" twice".format(func.__qualname__) logger.debug("Found task %s", func) _task_list[func.__qualname__] = Task(plugin_class=None, func=func, config=config) else: func.yaz_task_config = config return func
def get_parameters(self): """Returns a list of parameters""" if self.plugin_class is None: sig = inspect.signature(self.func) for index, parameter in enumerate(sig.parameters.values()): if not parameter.kind in [parameter.POSITIONAL_ONLY, parameter.KEYWORD_ONLY, parameter.POSITIONAL_OR_KEYWORD]: raise RuntimeError("Task {} contains an unsupported {} parameter".format(parameter, parameter.kind)) yield parameter else: var_keyword_seen = set() for cls in inspect.getmro(self.plugin_class): if issubclass(cls, BasePlugin) and hasattr(cls, self.func.__name__): func = getattr(cls, self.func.__name__) logger.debug("Found method %s from class %s", func, cls) var_keyword_found = False sig = inspect.signature(func) for index, parameter in enumerate(sig.parameters.values()): if index == 0: # skip "self" parameter continue if parameter.kind == inspect.Parameter.VAR_KEYWORD: # found "**kwargs" parameter. we will continue to the next class in the mro # to add any keyword parameters we have not yet used (i.e. whose name # we have not yet seen) var_keyword_found = True continue if parameter.kind in [parameter.POSITIONAL_ONLY, parameter.VAR_POSITIONAL]: raise RuntimeError("Task {} contains an unsupported parameter \"{}\"".format(func, parameter)) if not parameter.name in var_keyword_seen: var_keyword_seen.add(parameter.name) logger.debug("Found parameter %s (%s)", parameter, parameter.kind) yield parameter # we only need to look at the next class in the mro # when "**kwargs" is found if not var_keyword_found: break
def get_configuration(self, key, default=None): """Returns the configuration for KEY""" if key in self.config: return self.config.get(key) else: return default
def get_plugin_list(): """Finds all yaz plugins and returns them in a __qualname__: plugin_class dictionary""" global _yaz_plugin_classes def get_recursively(cls, plugin_list): for plugin in cls.__subclasses__(): if not (plugin.yaz_is_final() or plugin.__qualname__ in _yaz_plugin_classes): plugin_list[plugin.__qualname__].append(plugin) get_recursively(plugin, plugin_list) return plugin_list def include_class(candidate, classes): for cls in classes: if candidate is cls: continue if issubclass(cls, candidate): return False return True def get_plugin_type(qualname, plugins): classes = sorted(plugins, key=lambda plugin: plugin.yaz_get_ordinal()) # exclude classes that are implicitly included as parent classes classes = [cls for cls in classes if include_class(cls, classes)] logger.debug("New plugin class \"%s\" extending %s", qualname, [cls for cls in classes]) return type(qualname, tuple(classes) + (Final,), {}) logger.debug("Plugin list: %s" % _yaz_plugin_classes) # find all Plugin classes recursively plugin_list = get_recursively(BasePlugin, collections.defaultdict(list)) # combine all classes into their Plugin class (i.e. multiple inherited plugin) _yaz_plugin_classes.update((qualname, get_plugin_type(qualname, plugins)) for qualname, plugins in plugin_list.items()) assert isinstance(_yaz_plugin_classes, dict), type(_yaz_plugin_classes) assert all(isinstance(qualname, str) for qualname in _yaz_plugin_classes.keys()), "Every key should be a string" assert all(issubclass(plugin_class, Final) for plugin_class in _yaz_plugin_classes.values()), "Every value should be a 'Final' plugin" return _yaz_plugin_classes
def get_plugin_instance(plugin_class, *args, **kwargs): """Returns an instance of a fully initialized plugin class Every plugin class is kept in a plugin cache, effectively making every plugin into a singleton object. When a plugin has a yaz.dependency decorator, it will be called as well, before the instance is returned. """ assert issubclass(plugin_class, BasePlugin), type(plugin_class) global _yaz_plugin_instance_cache qualname = plugin_class.__qualname__ if not qualname in _yaz_plugin_instance_cache: plugin_class = get_plugin_list()[qualname] _yaz_plugin_instance_cache[qualname] = plugin = plugin_class(*args, **kwargs) # find any yaz.dependency decorators, and call them when necessary funcs = [func for _, func in inspect.getmembers(plugin) if inspect.ismethod(func) and hasattr(func, "yaz_dependency_config")] for func in funcs: signature = inspect.signature(func) assert all(parameter.kind is parameter.POSITIONAL_OR_KEYWORD and issubclass(parameter.annotation, BasePlugin) for parameter in signature.parameters.values()), "All parameters for {} must type hint to a BasePlugin".format(func) func(*[get_plugin_instance(parameter.annotation) for parameter in signature.parameters.values()]) return _yaz_plugin_instance_cache[qualname]
def xml_to_json(root): """Convert an Open511 XML document or document fragment to JSON. Takes an lxml Element object. Returns a dict ready to be JSON-serialized.""" j = {} if len(root) == 0: # Tag with no children, return str/int return _maybe_intify(root.text) if len(root) == 1 and root[0].tag.startswith('{' + NS_GML): # GML return gml_to_geojson(root[0]) if root.tag == 'open511': j['meta'] = {'version': root.get('version')} for elem in root: name = elem.tag if name == 'link' and elem.get('rel'): name = elem.get('rel') + '_url' if name == 'self_url': name = 'url' if root.tag == 'open511': j['meta'][name] = elem.get('href') continue elif name.startswith('{' + NS_PROTECTED): name = '!' + name[name.index('}') + 1:] elif name[0] == '{': # Namespace! name = '+' + name[name.index('}') + 1:] if name in j: continue # duplicate elif elem.tag == 'link' and not elem.text: j[name] = elem.get('href') elif len(elem): if name == 'grouped_events': # An array of URLs j[name] = [xml_link_to_json(child, to_dict=False) for child in elem] elif name in ('attachments', 'media_files'): # An array of JSON objects j[name] = [xml_link_to_json(child, to_dict=True) for child in elem] elif all((name == pluralize(child.tag) for child in elem)): # <something><somethings> serializes to a JSON array j[name] = [xml_to_json(child) for child in elem] else: j[name] = xml_to_json(elem) else: if root.tag == 'open511' and name.endswith('s') and not elem.text: # Special case: an empty e.g. <events /> container at the root level # should be serialized to [], not null j[name] = [] else: j[name] = _maybe_intify(elem.text) return j
def gml_to_geojson(el): """Given an lxml Element of a GML geometry, returns a dict in GeoJSON format.""" if el.get('srsName') not in ('urn:ogc:def:crs:EPSG::4326', None): if el.get('srsName') == 'EPSG:4326': return _gmlv2_to_geojson(el) else: raise NotImplementedError("Unrecognized srsName %s" % el.get('srsName')) tag = el.tag.replace('{%s}' % NS_GML, '') if tag == 'Point': coordinates = _reverse_gml_coords(el.findtext('{%s}pos' % NS_GML))[0] elif tag == 'LineString': coordinates = _reverse_gml_coords(el.findtext('{%s}posList' % NS_GML)) elif tag == 'Polygon': coordinates = [] for ring in el.xpath('gml:exterior/gml:LinearRing/gml:posList', namespaces=NSMAP) \ + el.xpath('gml:interior/gml:LinearRing/gml:posList', namespaces=NSMAP): coordinates.append(_reverse_gml_coords(ring.text)) elif tag in ('MultiPoint', 'MultiLineString', 'MultiPolygon'): single_type = tag[5:] member_tag = single_type[0].lower() + single_type[1:] + 'Member' coordinates = [ gml_to_geojson(member)['coordinates'] for member in el.xpath('gml:%s/gml:%s' % (member_tag, single_type), namespaces=NSMAP) ] else: raise NotImplementedError return { 'type': tag, 'coordinates': coordinates }
def _gmlv2_to_geojson(el): """Translates a deprecated GML 2.0 geometry to GeoJSON""" tag = el.tag.replace('{%s}' % NS_GML, '') if tag == 'Point': coordinates = [float(c) for c in el.findtext('{%s}coordinates' % NS_GML).split(',')] elif tag == 'LineString': coordinates = [ [float(x) for x in pair.split(',')] for pair in el.findtext('{%s}coordinates' % NS_GML).split(' ') ] elif tag == 'Polygon': coordinates = [] for ring in el.xpath('gml:outerBoundaryIs/gml:LinearRing/gml:coordinates', namespaces=NSMAP) \ + el.xpath('gml:innerBoundaryIs/gml:LinearRing/gml:coordinates', namespaces=NSMAP): coordinates.append([ [float(x) for x in pair.split(',')] for pair in ring.text.split(' ') ]) elif tag in ('MultiPoint', 'MultiLineString', 'MultiPolygon', 'MultiCurve'): if tag == 'MultiCurve': single_type = 'LineString' member_tag = 'curveMember' else: single_type = tag[5:] member_tag = single_type[0].lower() + single_type[1:] + 'Member' coordinates = [ gml_to_geojson(member)['coordinates'] for member in el.xpath('gml:%s/gml:%s' % (member_tag, single_type), namespaces=NSMAP) ] else: raise NotImplementedError return { 'type': tag, 'coordinates': coordinates }
def deparagraph(element, doc): """Panflute filter function that converts content wrapped in a Para to Plain. Use this filter with pandoc as:: pandoc [..] --filter=lsstprojectmeta-deparagraph Only lone paragraphs are affected. Para elements with siblings (like a second Para) are left unaffected. This filter is useful for processing strings like titles or author names so that the output isn't wrapped in paragraph tags. For example, without this filter, pandoc converts a string ``"The title"`` to ``<p>The title</p>`` in HTML. These ``<p>`` tags aren't useful if you intend to put the title text in ``<h1>`` tags using your own templating system. """ if isinstance(element, Para): # Check if siblings exist; don't process the paragraph in that case. if element.next is not None: return element elif element.prev is not None: return element # Remove the Para wrapper from the lone paragraph. # `Plain` is a container that isn't rendered as a paragraph. return Plain(*element.content)
def all_subclasses(cls): """ Recursively generate of all the subclasses of class cls. """ for subclass in cls.__subclasses__(): yield subclass for subc in all_subclasses(subclass): yield subc
def unique_justseen(iterable, key=None): "List unique elements, preserving order. Remember only the element just seen." # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B # unique_justseen('ABBCcAD', str.lower) --> A B C A D try: # PY2 support from itertools import imap as map except ImportError: from builtins import map return map(next, map(operator.itemgetter(1), itertools.groupby(iterable, key)))
def normalize_array(var): """ Returns a normalized data array from a NetCDF4 variable. This is mostly used to normalize string types between py2 and py3. It has no effect on types other than chars/strings """ if np.issubdtype(var.dtype, 'S1'): if var.dtype == str: # Python 2 on netCDF4 'string' variables needs this. # Python 3 returns false for np.issubdtype(var.dtype, 'S1') return var[:] def decoder(x): return str(x.decode('utf-8')) vfunc = np.vectorize(decoder) return vfunc(nc4.chartostring(var[:])) else: return var[:]
def generic_masked(arr, attrs=None, minv=None, maxv=None, mask_nan=True): """ Returns a masked array with anything outside of values masked. The minv and maxv parameters take precendence over any dict values. The valid_range attribute takes precendence over the valid_min and valid_max attributes. """ attrs = attrs or {} if 'valid_min' in attrs: minv = safe_attribute_typing(arr.dtype, attrs['valid_min']) if 'valid_max' in attrs: maxv = safe_attribute_typing(arr.dtype, attrs['valid_max']) if 'valid_range' in attrs: vr = attrs['valid_range'] minv = safe_attribute_typing(arr.dtype, vr[0]) maxv = safe_attribute_typing(arr.dtype, vr[1]) # Get the min/max of values that the hardware supports try: info = np.iinfo(arr.dtype) except ValueError: info = np.finfo(arr.dtype) minv = minv if minv is not None else info.min maxv = maxv if maxv is not None else info.max if mask_nan is True: arr = np.ma.fix_invalid(arr) return np.ma.masked_outside( arr, minv, maxv )
def dictify_urn(urn, combine_interval=True): """ By default, this will put the `interval` as part of the `cell_methods` attribute (NetCDF CF style). To return `interval` as its own key, use the `combine_interval=False` parameter. """ ioos_urn = IoosUrn.from_string(urn) if ioos_urn.valid() is False: return dict() if ioos_urn.asset_type != 'sensor': logger.error("This function only works on 'sensor' URNs.") return dict() if '#' in ioos_urn.component: standard_name, extras = ioos_urn.component.split('#') else: standard_name = ioos_urn.component extras = '' d = dict(standard_name=standard_name) # Discriminant if '-' in ioos_urn.component: d['discriminant'] = standard_name.split('-')[-1] d['standard_name'] = standard_name.split('-')[0] intervals = [] cell_methods = [] if extras: for section in extras.split(';'): key, values = section.split('=') if key == 'interval': # special case, intervals should be appended to the cell_methods for v in values.split(','): intervals.append(v) else: if key == 'cell_methods': value = [ x.replace('_', ' ').replace(':', ': ') for x in values.split(',') ] cell_methods = value else: value = ' '.join([x.replace('_', ' ').replace(':', ': ') for x in values.split(',')]) d[key] = value if combine_interval is True: if cell_methods and intervals: if len(cell_methods) == len(intervals): d['cell_methods'] = ' '.join([ '{} (interval: {})'.format(x[0], x[1].upper()) for x in zip(cell_methods, intervals) ]) else: d['cell_methods'] = ' '.join(cell_methods) for i in intervals: d['cell_methods'] += ' (interval: {})'.format(i.upper()) elif cell_methods: d['cell_methods'] = ' '.join(cell_methods) for i in intervals: d['cell_methods'] += ' (interval: {})'.format(i.upper()) elif intervals: raise ValueError("An interval without a cell_method is not allowed! Not possible!") else: d['cell_methods'] = ' '.join(cell_methods) d['interval'] = ','.join(intervals).upper() if 'vertical_datum' in d: d['vertical_datum'] = d['vertical_datum'].upper() return d
def default(self, obj): """If input object is an ndarray it will be converted into a list """ if isinstance(obj, np.ndarray): return obj.tolist() elif isinstance(obj, np.generic): return np.asscalar(obj) # Let the base class default method raise the TypeError return json.JSONEncoder(self, obj)
def default(self, obj): """If input object is an ndarray it will be converted into a dict holding dtype, shape and the data, base64 encoded. """ if isinstance(obj, np.ndarray): if obj.flags['C_CONTIGUOUS']: obj_data = obj.data else: cont_obj = np.ascontiguousarray(obj) assert(cont_obj.flags['C_CONTIGUOUS']) obj_data = cont_obj.data data_b64 = base64.b64encode(obj_data) return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape) elif isinstance(obj, np.generic): return np.asscalar(obj) # Let the base class default method raise the TypeError return json.JSONEncoder(self, obj)
def mapfivo(ol,*args,**kwargs): ''' #mapfivo f,i,v,o四元决定 fivo-4-tuple-engine #map_func diff_func(index,value,*diff_args) ''' args = list(args) lngth = args.__len__() if(lngth==0): diff_funcs_arr = kwargs['map_funcs'] diff_args_arr = kwargs['map_func_args_array'] elif(lngth==1): if('map_func_args_array' in kwargs): diff_funcs_arr = args[0] diff_args_arr = kwargs['map_func_args_array'] else: diff_funcs_arr = kwargs['map_funcs'] diff_args_arr = args[0] else: diff_funcs_arr = args[0] diff_args_arr = args[1] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = diff_args_arr[i] ele = func(index,value,*args) rslt.append(ele) return(rslt)
def mapfiv(ol,map_func_args,**kwargs): ''' #mapfiv 共享相同的o share common other_args #map_func diff_func(index,value,*common_args) ''' lngth = ol.__len__() diff_funcs_arr = kwargs['map_funcs'] common_args_arr = init(lngth,map_func_args) rslt = mapfivo(ol,map_funcs=diff_funcs_arr,map_func_args_array=common_args_arr) return(rslt)
def mapivo(ol,map_func,**kwargs): ''' #mapivo 共享相同的f share common map_func #map_func common_func(index,value,*diff_args) ''' lngth = ol.__len__() common_funcs_arr = init(lngth,map_func) diff_args_arr = kwargs['map_func_args_array'] rslt = mapfivo(ol,map_funcs=common_funcs_arr,map_func_args_array=diff_args_arr) return(rslt)
def array_dualmap(ol,value_map_func,**kwargs): ''' from elist.elist import * ol = ['a','b','c','d'] def index_map_func(index,prefix,suffix): s = prefix +str(index+97)+ suffix return(s) def value_map_func(mapped_index,ele,prefix,suffix): s = prefix+mapped_index+': ' + str(ele) + suffix return(s) #### rslt = array_dualmap2(ol,index_map_func=index_map_func,index_map_func_args=[': ',' is '],value_map_func=value_map_func,value_map_func_args=['ord',' yes?']) pobj(rslt) ''' def get_self(obj): return(obj) if('index_map_func_args' in kwargs): index_map_func_args = kwargs['index_map_func_args'] else: index_map_func_args = [] if('value_map_func_args' in kwargs): value_map_func_args = kwargs['value_map_func_args'] else: value_map_func_args = [] if('index_map_func' in kwargs): index_map_func = kwargs['index_map_func'] else: index_map_func = get_self length = ol.__len__() il = list(range(0,length)) nil = list(map(lambda ele:index_map_func(ele,*index_map_func_args),il)) nvl = [] for i in range(0,length): ele = ol[i] v = value_map_func(nil[i],ele,*value_map_func_args) nvl.append(v) return(nvl)
def array_dualmap2(*refls,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] refl1 = ['+','+','+','+'] refl2 = [7,7,7,7] refl3 = ['=','=','=','='] def index_map_func(index): s ="<"+str(index)+">" return(s) def value_map_func(mapped_index,ele,ref_ele1,ref_ele2,ref_ele3,prefix,suffix): s = prefix+mapped_index+': ' + str(ele) + str(ref_ele1) + str(ref_ele2) + str(ref_ele3) + suffix return(s) #### rslt = array_dualmap2(ol,refl1,refl2,refl3,index_map_func=index_map_func,value_map_func=value_map_func,value_map_func_args=['Q','?']) pobj(rslt) ''' def get_self(obj,*args): return(obj) if('value_map_func_args' in kwargs): value_map_func_args = kwargs['value_map_func_args'] else: value_map_func_args = [] if('index_map_func' in kwargs): index_map_func = kwargs['index_map_func'] else: index_map_func = get_self if('index_map_func_args' in kwargs): index_map_func_args = kwargs['index_map_func_args'] else: index_map_func_args = [] length = ol.__len__() il = list(range(0,length)) nil = list(map(lambda ele:index_map_func(ele,*index_map_func_args),il)) refls = list(refls) refls = prepend(refls,nil) nvl = array_map2(*refls,map_func = value_map_func,map_func_args=value_map_func_args) return(nvl)
def mapfi(ol,map_func_args,**kwargs): ''' #mapfi 共享相同的o,v不作为map_func参数 # share common other_args,NOT take value as a param for map_func #map_func diff_func(index,*common_args) ''' diff_funcs_arr = kwargs['map_funcs'] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = map_func_args ele = func(index,*args) rslt.append(ele) return(rslt)
def mapfv(ol,map_func_args,*args,**kwargs): ''' #mapfv 共享相同的o,i不作为map_func参数 # share common other_args,NOT take value as a param for map_func #map_func diff_func(value,*common_args) ''' args = list(args) lngth = args.__len__() if(lngth == 0): diff_funcs_arr = kwargs['map_funcs'] else: diff_funcs_arr = args[0] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = map_func_args ele = func(value,*args) rslt.append(ele) return(rslt)
def mapfo(ol,**kwargs): ''' #mapfo i不作为map_func参数,v不作为map_func参数 # NOT take value as a param for map_func,NOT take index as a param for map_func #map_func diff_func(*diff_args) ''' diff_args_arr = kwargs['map_func_args_array'] diff_funcs_arr = kwargs['map_funcs'] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = diff_args_arr[i] ele = func(value,*args) rslt.append(ele) return(rslt)
def mapiv2(ol,map_func,*args,**kwargs): ''' from elist.elist import * ol = ['a','b','c','d'] #1 def map_func(index,value,*others): return(value * index + others[0] +others[-1]) mapiv(ol,map_func,'tailA-','tailB') #2 mapiv2(ol,lambda index,value,other:(value*index+other),['-']) mapiv2(ol,lambda index,value,other:(value*index+other),'-') mapiv2(ol,lambda index,value:(value*index)) ''' args = list(args) if(args.__len__() > 0): map_func_args = args else: if('map_func_args' in kwargs): map_func_args = kwargs['map_func_args'] else: map_func_args = [] lngth = ol.__len__() rslt = [] for i in range(0,lngth): ele = map_func(i,ol[i],*map_func_args) rslt.append(ele) return(rslt)
def mapvo(ol,map_func,*args,**kwargs): ''' #mapvo 共享相同的f,i不作为map_func参数 # share common map_func,NOT take index as a param for map_func # common_func(value,*priv_args) ''' lngth = ol.__len__() args = list(args) if(args.__len__()==0): diff_args_arr = kwargs['map_func_args_array'] else: diff_args_arr = args[0] rslt = [] for i in range(0,lngth): index = i value = ol[i] func = map_func args = diff_args_arr[i] ele = func(value,*args) rslt.append(ele) return(rslt)
def array_map2(*referls,**kwargs): ''' obseleted just for compatible from elist.elist import * ol = [1,2,3,4] refl1 = ['+','+','+','+'] refl2 = [7,7,7,7] refl3 = ['=','=','=','='] def map_func(ele,ref_ele1,ref_ele2,ref_ele3,prefix,suffix): s = prefix+': ' + str(ele) + str(ref_ele1) + str(ref_ele2) + str(ref_ele3) + suffix return(s) #### rslt = array_map2(ol,refl1,refl2,refl3,map_func=map_func,map_func_args=['Q','?']) pobj(rslt) ''' map_func = kwargs['map_func'] if('map_func_args' in kwargs): map_func_args = kwargs['map_func_args'] else: map_func_args = [] length = referls.__len__() rslt = [] anum = list(referls)[0].__len__() for j in range(0,anum): args = [] for i in range(0,length): refl = referls[i] args.append(refl[j]) args.extend(map_func_args) v = map_func(*args) rslt.append(v) return(rslt)
def mapi(ol,map_func,map_func_args=[]): ''' #mapi v不作为map_func参数,共享相同的f,共享相同的o # NOT take value as a param for map_func # share common other_args # share common map_func # common_func(index,*common_args) ''' lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = map_func args = map_func_args ele = func(index,*args) rslt.append(ele) return(rslt)
def mapv(ol,map_func,map_func_args=[]): ''' #mapv i不作为map_func参数,共享相同的f,共享相同的o # NOT take index as a param for map_func # share common other_args # share common map_func # common_func(value,*common_args) ''' rslt = list(map(lambda ele:map_func(ele,*map_func_args),ol)) return(rslt)
def array_map(ol,map_func,*args): ''' obseleted,just for compatible from elist.elist import * ol = [1,2,3,4] def map_func(ele,mul,plus): return(ele*mul+plus) array_map(ol,map_func,2,100) ''' rslt = list(map(lambda ele:map_func(ele,*args),ol)) return(rslt)
def mapo(ol,map_func,*params,**kwargs): ''' #mapo i不作为map_func参数,v不作为map_func参数,共享相同的f # NOT take index as a param for map_func # NOT take value as a param for map_func # share common map_func # common_func(*priv_args) ''' params = list(params) if(params.__len__()==0): diff_args_arr = kwargs['map_func_args_array'] elif(isinstance(params[0],list)): diff_args_arr = params[0] else: diff_args_arr = params lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = map_func args = diff_args_arr[i] ele = func(*args) rslt.append(ele) return(rslt)
def findfivo(ol,*args,**kwargs): ''' #findfivo f,i,v,o四元决定 fivo-4-tuple-engine #cond_func diff_func(index,value,*diff_args) ''' args = list(args) lngth = args.__len__() if(lngth==0): diff_funcs_arr = kwargs['cond_funcs'] diff_args_arr = kwargs['cond_func_args_array'] elif(lngth==1): if('cond_func_args_array' in kwargs): diff_funcs_arr = args[0] diff_args_arr = kwargs['cond_func_args_array'] else: diff_funcs_arr = kwargs['cond_funcs'] diff_args_arr = args[0] else: diff_funcs_arr = args[0] diff_args_arr = args[1] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = diff_args_arr[i] cond = func(index,value,*args) if(cond): rslt.append((index,value)) else: pass return(rslt)
def findfiv(ol,cond_func_args,**kwargs): ''' #findfiv 共享相同的o share common other_args #cond_func diff_func(index,value,*common_args) ''' lngth = ol.__len__() diff_funcs_arr = kwargs['cond_funcs'] common_args_arr = init(lngth,map_func_args) rslt = findfivo(ol,cond_funcs=diff_funcs_arr,cond_func_args_array=common_args_arr) return(rslt)
def findv(ol,cond_func,cond_func_args=[]): ''' #mapv i不作为map_func参数,共享相同的f,共享相同的o # NOT take index as a param for map_func # share common other_args # share common cond_func # common_func(value,*common_args) ''' rslt = [] for i in range(ol.__len__()): cond = cond_func(ol[i],*cond_func_args) if(cond): rslt.append((i,ol[i])) else: pass return(rslt)
def cond_select_indexes_all(ol,**kwargs): ''' from elist.elist import * from elist.jprint import pobj def test_func(ele,x): cond = (ele > x) return(cond) ol = [1,2,3,4,5,6,7] rslt = cond_select_indexes_all(ol,cond_func = test_func, cond_func_args = [3]) pobj(rslt) ''' cond_func = kwargs['cond_func'] if('cond_func_args' in kwargs): cond_func_args = kwargs['cond_func_args'] else: cond_func_args = [] #### founded = find_all(ol,cond_func,*cond_func_args) rslt = array_map(founded,lambda ele:ele['index']) return(rslt)
def cond_select_indexes_all2(ol,**kwargs): ''' from elist.elist import * from xdict.jprint import pobj def test_func(ele,index,x): cond1 = (ele > x) cond2 = (index %2 == 0) cond =(cond1 & cond2) return(cond) ol = [1,2,3,4,5,6,7] rslt = cond_select_indexes_all2(ol,cond_func = test_func,cond_func_args = [3]) pobj(rslt) ''' cond_func = kwargs['cond_func'] if('cond_func_args' in kwargs): cond_func_args = kwargs['cond_func_args'] else: cond_func_args = [] #### founded = find_all2(ol,cond_func,*cond_func_args) rslt = array_map(founded,lambda ele:ele['index']) return(rslt)
def select_seqs(ol,seqs): ''' from elist.elist import * ol = ['a','b','c','d'] select_seqs(ol,[1,2]) ''' rslt =copy.deepcopy(ol) rslt = itemgetter(*seqs)(ol) if(seqs.__len__()==0): rslt = [] elif(seqs.__len__()==1): rslt = [rslt] else: rslt = list(rslt) return(rslt)
def append(ol,ele,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] ele = 5 id(ol) append(ol,ele,mode="original") ol id(ol) #### ol = [1,2,3,4] ele = 5 id(ol) new = append(ol,ele) new id(new) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" if(mode == "new"): new = copy.deepcopy(ol) new.append(ele) return(new) else: ol.append(ele) return(ol)
def append_some(ol,*eles,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] id(ol) append_some(ol,5,6,7,8,mode="original") ol id(ol) #### ol = [1,2,3,4] id(ol) new = append_some(ol,5,6,7,8) new id(new) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" return(extend(ol,list(eles),mode=mode))
def prepend(ol,ele,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] ele = 5 id(ol) prepend(ol,ele,mode="original") ol id(ol) #### ol = [1,2,3,4] ele = 5 id(ol) new = prepend(ol,ele) new id(new) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" if(mode == "new"): new = [ele] cpol = copy.deepcopy(ol) new.extend(cpol) return(new) else: length = ol.__len__() ol.append(None) for i in range(length-1,-1,-1): ol[i+1] = ol[i] ol[0] = ele return(ol)
def prepend_some(ol,*eles,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] id(ol) prepend_some(ol,5,6,7,8,mode="original") ol id(ol) #### ol = [1,2,3,4] id(ol) new = prepend_some(ol,5,6,7,8) new id(new) #####unshift is the same as prepend_some >>> unshift(ol,9,10,11,12) [9, 10, 11, 12, 1, 2, 3, 4] ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" return(prextend(ol,list(eles),mode=mode))
def extend(ol,nl,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] nl = [5,6,7,8] id(ol) extend(ol,nl,mode="original") ol id(ol) #### ol = [1,2,3,4] nl = [5,6,7,8] id(ol) new = extend(ol,nl) new id(new) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" if(mode == "new"): new = copy.deepcopy(ol) cpnl = copy.deepcopy(nl) new.extend(cpnl) return(new) else: ol.extend(nl) return(ol)
def push(ol,*eles,**kwargs): ''' from elist.elist import * ol=[1,2,3,4] id(ol) new = push(ol,5,6,7) new id(new) #### ol=[1,2,3,4] id(ol) rslt = push(ol,5,6,7,mode="original") rslt id(rslt) ''' if('mode' in kwargs): mode = kwargs['mode'] else: mode = "new" eles = list(eles) return(extend(ol,eles,mode=mode))
def prextend(ol,nl,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] nl = [5,6,7,8] id(ol) id(nl) prextend(ol,nl,mode="original") ol id(ol) #### ol = [1,2,3,4] nl = [5,6,7,8] id(ol) id(nl) new = prextend(ol,nl) new id(new) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" if(mode == "new"): new = copy.deepcopy(nl) cpol = copy.deepcopy(ol) new.extend(cpol) return(new) else: length = ol.__len__() nl_len = nl.__len__() for i in range(0,nl_len): ol.append(None) for i in range(length-1,-1,-1): ol[i+nl_len] = ol[i] for i in range(0,nl_len): ol[i] = nl[i] return(ol)
def concat(*arrays): ''' from elist.elist import * l1 = [1,2,3] l2 = ["a","b","c"] l3 = [100,200] id(l1) id(l2) id(l3) arrays = [l1,l2,l3] new = concat(arrays) new id(new) ''' new = [] length = arrays.__len__() for i in range(0,length): array = copy.deepcopy(arrays[i]) new.extend(array) return(new)
def cdr(ol,**kwargs): ''' from elist.elist import * ol=[1,2,3,4] id(ol) new = cdr(ol) new id(new) #### ol=[1,2,3,4] id(ol) rslt = cdr(ol,mode="original") rslt id(rslt) ''' if('mode' in kwargs): mode = kwargs['mode'] else: mode = "new" if(mode == "new"): cpol = copy.deepcopy(ol) return(cpol[1:]) else: ol.pop(0) return(ol)
def cons(head_ele,l,**kwargs): ''' from elist.elist import * ol=[1,2,3,4] id(ol) new = cons(5,ol) new id(new) #### ol=[1,2,3,4] id(ol) rslt = cons(5,ol,mode="original") rslt id(rslt) ''' if('mode' in kwargs): mode = kwargs['mode'] else: mode = "new" return(prepend(l,head_ele,mode=mode))
def uniform_index(index,length): ''' uniform_index(0,3) uniform_index(-1,3) uniform_index(-4,3) uniform_index(-3,3) uniform_index(5,3) ''' if(index<0): rl = length+index if(rl<0): index = 0 else: index = rl elif(index>=length): index = length else: index = index return(index)
def insert(ol,start_index,ele,**kwargs): ''' from elist.elist import * ol = [1,2,3,4] ele = 5 id(ol) insert(ol,2,ele,mode="original") ol id(ol) #### ol = [1,2,3,4] ele = 5 id(ol) new = insert(ol,2,ele) new id(new) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" if(mode == "new"): length = ol.__len__() cpol = copy.deepcopy(ol) si = uniform_index(start_index,length) new = copy.deepcopy(cpol[0:si]) new.append(ele) new.extend(cpol[si:]) return(new) else: ol.insert(start_index,ele) return(ol)