Search is not available for this dataset
text
stringlengths
75
104k
def preserve(method=None, result=True, fields=None): """Preserve fields in deposit. :param method: Function to execute. (Default: ``None``) :param result: If `True` returns the result of method execution, otherwise `self`. (Default: ``True``) :param fields: List of fields to preserve (default: ``('_deposit',)``). """ if method is None: return partial(preserve, result=result, fields=fields) fields = fields or ('_deposit', ) @wraps(method) def wrapper(self, *args, **kwargs): """Check current deposit status.""" data = {field: self[field] for field in fields if field in self} result_ = method(self, *args, **kwargs) replace = result_ if result else self for field in data: replace[field] = data[field] return result_ return wrapper
def pid(self): """Return an instance of deposit PID.""" pid = self.deposit_fetcher(self.id, self) return PersistentIdentifier.get(pid.pid_type, pid.pid_value)
def record_schema(self): """Convert deposit schema to a valid record schema.""" schema_path = current_jsonschemas.url_to_path(self['$schema']) schema_prefix = current_app.config['DEPOSIT_JSONSCHEMAS_PREFIX'] if schema_path and schema_path.startswith(schema_prefix): return current_jsonschemas.path_to_url( schema_path[len(schema_prefix):] )
def build_deposit_schema(self, record): """Convert record schema to a valid deposit schema. :param record: The record used to build deposit schema. :returns: The absolute URL to the schema or `None`. """ schema_path = current_jsonschemas.url_to_path(record['$schema']) schema_prefix = current_app.config['DEPOSIT_JSONSCHEMAS_PREFIX'] if schema_path: return current_jsonschemas.path_to_url( schema_prefix + schema_path )
def fetch_published(self): """Return a tuple with PID and published record.""" pid_type = self['_deposit']['pid']['type'] pid_value = self['_deposit']['pid']['value'] resolver = Resolver( pid_type=pid_type, object_type='rec', getter=partial(self.published_record_class.get_record, with_deleted=True) ) return resolver.resolve(pid_value)
def merge_with_published(self): """Merge changes with latest published version.""" pid, first = self.fetch_published() lca = first.revisions[self['_deposit']['pid']['revision_id']] # ignore _deposit and $schema field args = [lca.dumps(), first.dumps(), self.dumps()] for arg in args: del arg['$schema'], arg['_deposit'] args.append({}) m = Merger(*args) try: m.run() except UnresolvedConflictsException: raise MergeConflict() return patch(m.unified_patches, lca)
def commit(self, *args, **kwargs): """Store changes on current instance in database and index it.""" return super(Deposit, self).commit(*args, **kwargs)
def create(cls, data, id_=None): """Create a deposit. Initialize the follow information inside the deposit: .. code-block:: python deposit['_deposit'] = { 'id': pid_value, 'status': 'draft', 'owners': [user_id], 'created_by': user_id, } The deposit index is updated. :param data: Input dictionary to fill the deposit. :param id_: Default uuid for the deposit. :returns: The new created deposit. """ data.setdefault('$schema', current_jsonschemas.path_to_url( current_app.config['DEPOSIT_DEFAULT_JSONSCHEMA'] )) if '_deposit' not in data: id_ = id_ or uuid.uuid4() cls.deposit_minter(id_, data) data['_deposit'].setdefault('owners', list()) if current_user and current_user.is_authenticated: creator_id = int(current_user.get_id()) if creator_id not in data['_deposit']['owners']: data['_deposit']['owners'].append(creator_id) data['_deposit']['created_by'] = creator_id return super(Deposit, cls).create(data, id_=id_)
def _process_files(self, record_id, data): """Snapshot bucket and add files in record during first publishing.""" if self.files: assert not self.files.bucket.locked self.files.bucket.locked = True snapshot = self.files.bucket.snapshot(lock=True) data['_files'] = self.files.dumps(bucket=snapshot.id) yield data db.session.add(RecordsBuckets( record_id=record_id, bucket_id=snapshot.id )) else: yield data
def _publish_new(self, id_=None): """Publish new deposit. :param id_: The forced record UUID. """ minter = current_pidstore.minters[ current_app.config['DEPOSIT_PID_MINTER'] ] id_ = id_ or uuid.uuid4() record_pid = minter(id_, self) self['_deposit']['pid'] = { 'type': record_pid.pid_type, 'value': record_pid.pid_value, 'revision_id': 0, } data = dict(self.dumps()) data['$schema'] = self.record_schema with self._process_files(id_, data): record = self.published_record_class.create(data, id_=id_) return record
def _publish_edited(self): """Publish the deposit after for editing.""" record_pid, record = self.fetch_published() if record.revision_id == self['_deposit']['pid']['revision_id']: data = dict(self.dumps()) else: data = self.merge_with_published() data['$schema'] = self.record_schema data['_deposit'] = self['_deposit'] record = record.__class__(data, model=record.model) return record
def publish(self, pid=None, id_=None): """Publish a deposit. If it's the first time: * it calls the minter and set the following meta information inside the deposit: .. code-block:: python deposit['_deposit'] = { 'type': pid_type, 'value': pid_value, 'revision_id': 0, } * A dump of all information inside the deposit is done. * A snapshot of the files is done. Otherwise, published the new edited version. In this case, if in the mainwhile someone already published a new version, it'll try to merge the changes with the latest version. .. note:: no need for indexing as it calls `self.commit()`. Status required: ``'draft'``. :param pid: Force the new pid value. (Default: ``None``) :param id_: Force the new uuid value as deposit id. (Default: ``None``) :returns: Returns itself. """ pid = pid or self.pid if not pid.is_registered(): raise PIDInvalidAction() self['_deposit']['status'] = 'published' if self['_deposit'].get('pid') is None: # First publishing self._publish_new(id_=id_) else: # Update after edit record = self._publish_edited() record.commit() self.commit() return self
def _prepare_edit(self, record): """Update selected keys. :param record: The record to prepare. """ data = record.dumps() # Keep current record revision for merging. data['_deposit']['pid']['revision_id'] = record.revision_id data['_deposit']['status'] = 'draft' data['$schema'] = self.build_deposit_schema(record) return data
def edit(self, pid=None): """Edit deposit. #. The signal :data:`invenio_records.signals.before_record_update` is sent before the edit execution. #. The following meta information are saved inside the deposit: .. code-block:: python deposit['_deposit']['pid'] = record.revision_id deposit['_deposit']['status'] = 'draft' deposit['$schema'] = deposit_schema_from_record_schema #. The signal :data:`invenio_records.signals.after_record_update` is sent after the edit execution. #. The deposit index is updated. Status required: `published`. .. note:: the process fails if the pid has status :attr:`invenio_pidstore.models.PIDStatus.REGISTERED`. :param pid: Force a pid object. (Default: ``None``) :returns: A new Deposit object. """ pid = pid or self.pid with db.session.begin_nested(): before_record_update.send( current_app._get_current_object(), record=self) record_pid, record = self.fetch_published() assert PIDStatus.REGISTERED == record_pid.status assert record['_deposit'] == self['_deposit'] self.model.json = self._prepare_edit(record) flag_modified(self.model, 'json') db.session.merge(self.model) after_record_update.send( current_app._get_current_object(), record=self) return self.__class__(self.model.json, model=self.model)
def discard(self, pid=None): """Discard deposit changes. #. The signal :data:`invenio_records.signals.before_record_update` is sent before the edit execution. #. It restores the last published version. #. The following meta information are saved inside the deposit: .. code-block:: python deposit['$schema'] = deposit_schema_from_record_schema #. The signal :data:`invenio_records.signals.after_record_update` is sent after the edit execution. #. The deposit index is updated. Status required: ``'draft'``. :param pid: Force a pid object. (Default: ``None``) :returns: A new Deposit object. """ pid = pid or self.pid with db.session.begin_nested(): before_record_update.send( current_app._get_current_object(), record=self) _, record = self.fetch_published() self.model.json = deepcopy(record.model.json) self.model.json['$schema'] = self.build_deposit_schema(record) flag_modified(self.model, 'json') db.session.merge(self.model) after_record_update.send( current_app._get_current_object(), record=self) return self.__class__(self.model.json, model=self.model)
def delete(self, force=True, pid=None): """Delete deposit. Status required: ``'draft'``. :param force: Force deposit delete. (Default: ``True``) :param pid: Force pid object. (Default: ``None``) :returns: A new Deposit object. """ pid = pid or self.pid if self['_deposit'].get('pid'): raise PIDInvalidAction() if pid: pid.delete() return super(Deposit, self).delete(force=force)
def clear(self, *args, **kwargs): """Clear only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved. """ super(Deposit, self).clear(*args, **kwargs)
def update(self, *args, **kwargs): """Update only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved. """ super(Deposit, self).update(*args, **kwargs)
def patch(self, *args, **kwargs): """Patch only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved. """ return super(Deposit, self).patch(*args, **kwargs)
def files(self): """List of Files inside the deposit. Add validation on ``sort_by`` method: if, at the time of files access, the record is not a ``'draft'`` then a :exc:`invenio_pidstore.errors.PIDInvalidAction` is rised. """ files_ = super(Deposit, self).files if files_: sort_by_ = files_.sort_by def sort_by(*args, **kwargs): """Only in draft state.""" if 'draft' != self.status: raise PIDInvalidAction() return sort_by_(*args, **kwargs) files_.sort_by = sort_by return files_
def rst2node(doc_name, data): """Converts a reStructuredText into its node """ if not data: return parser = docutils.parsers.rst.Parser() document = docutils.utils.new_document('<%s>' % doc_name) document.settings = docutils.frontend.OptionParser().get_default_values() document.settings.tab_width = 4 document.settings.pep_references = False document.settings.rfc_references = False document.settings.env = Env() parser.parse(data, document) if len(document.children) == 1: return document.children[0] else: par = docutils.nodes.paragraph() for child in document.children: par += child return par
def setup(app): """Hook the directives when Sphinx ask for it.""" if 'http' not in app.domains: httpdomain.setup(app) app.add_directive('autopyramid', RouteDirective)
def _parse_response(self, response): """Parses the API response and raises appropriate errors if raise_errors was set to True """ if not self._raise_errors: return response is_4xx_error = str(response.status_code)[0] == '4' is_5xx_error = str(response.status_code)[0] == '5' content = response.content if response.status_code == 403: raise AuthenticationError(content) elif is_4xx_error: raise APIError(content) elif is_5xx_error: raise ServerError(content) return response
def _api_request(self, endpoint, http_method, *args, **kwargs): """Private method for api requests""" logger.debug(' > Sending API request to endpoint: %s' % endpoint) auth = self._build_http_auth() headers = self._build_request_headers(kwargs.get('headers')) logger.debug('\theaders: %s' % headers) path = self._build_request_path(endpoint) logger.debug('\tpath: %s' % path) data = self._build_payload(kwargs.get('payload')) if not data: data = kwargs.get('data') logger.debug('\tdata: %s' % data) req_kw = dict( auth=auth, headers=headers, timeout=kwargs.get('timeout', self.DEFAULT_TIMEOUT) ) # do some error handling if (http_method == self.HTTP_POST): if (data): r = requests.post(path, data=data, **req_kw) else: r = requests.post(path, **req_kw) elif http_method == self.HTTP_PUT: if (data): r = requests.put(path, data=data, **req_kw) else: r = requests.put(path, **req_kw) elif http_method == self.HTTP_DELETE: r = requests.delete(path, **req_kw) else: r = requests.get(path, **req_kw) logger.debug('\tresponse code:%s' % r.status_code) try: logger.debug('\tresponse: %s' % r.json()) except: logger.debug('\tresponse: %s' % r.content) return self._parse_response(r)
def get_log(self, log_id, timeout=None): """ API call to get a specific log entry """ return self._api_request( self.GET_LOG_ENDPOINT % log_id, self.HTTP_GET, timeout=timeout )
def get_log_events(self, log_id, timeout=None): """ API call to get a specific log entry """ return self._api_request( self.GET_LOG_EVENTS_ENDPOINT % log_id, self.HTTP_GET, timeout=timeout )
def templates(self, timeout=None): """ API call to get a list of templates """ return self._api_request( self.TEMPLATES_ENDPOINT, self.HTTP_GET, timeout=timeout )
def get_template(self, template_id, version=None, timeout=None): """ API call to get a specific template """ if (version): return self._api_request( self.TEMPLATES_VERSION_ENDPOINT % (template_id, version), self.HTTP_GET, timeout=timeout ) else: return self._api_request( self.TEMPLATES_SPECIFIC_ENDPOINT % template_id, self.HTTP_GET, timeout=timeout )
def create_email(self, name, subject, html, text=''): """ [DECPRECATED] API call to create an email """ return self.create_template(name, subject, html, text)
def create_template( self, name, subject, html, text='', timeout=None ): """ API call to create a template """ payload = { 'name': name, 'subject': subject, 'html': html, 'text': text } return self._api_request( self.TEMPLATES_ENDPOINT, self.HTTP_POST, payload=payload, timeout=timeout )
def create_new_locale( self, template_id, locale, version_name, subject, text='', html='', timeout=None ): """ API call to create a new locale and version of a template """ payload = { 'locale': locale, 'name': version_name, 'subject': subject } if html: payload['html'] = html if text: payload['text'] = text return self._api_request( self.TEMPLATES_LOCALES_ENDPOINT % template_id, self.HTTP_POST, payload=payload, timeout=timeout )
def create_new_version( self, name, subject, text='', template_id=None, html=None, locale=None, timeout=None ): """ API call to create a new version of a template """ if(html): payload = { 'name': name, 'subject': subject, 'html': html, 'text': text } else: payload = { 'name': name, 'subject': subject, 'text': text } if locale: url = self.TEMPLATES_SPECIFIC_LOCALE_VERSIONS_ENDPOINT % ( template_id, locale ) else: url = self.TEMPLATES_NEW_VERSION_ENDPOINT % template_id return self._api_request( url, self.HTTP_POST, payload=payload, timeout=timeout )
def update_template_version( self, name, subject, template_id, version_id, text='', html=None, timeout=None ): """ API call to update a template version """ if(html): payload = { 'name': name, 'subject': subject, 'html': html, 'text': text } else: payload = { 'name': name, 'subject': subject, 'text': text } return self._api_request( self.TEMPLATES_VERSION_ENDPOINT % (template_id, version_id), self.HTTP_PUT, payload=payload, timeout=timeout )
def snippets(self, timeout=None): """ API call to get list of snippets """ return self._api_request( self.SNIPPETS_ENDPOINT, self.HTTP_GET, timeout=timeout )
def get_snippet(self, snippet_id, timeout=None): """ API call to get a specific Snippet """ return self._api_request( self.SNIPPET_ENDPOINT % (snippet_id), self.HTTP_GET, timeout=timeout )
def create_snippet(self, name, body, timeout=None): """ API call to create a Snippet """ payload = { 'name': name, 'body': body } return self._api_request( self.SNIPPETS_ENDPOINT, self.HTTP_POST, payload=payload, timeout=timeout )
def _make_file_dict(self, f): """Make a dictionary with filename and base64 file data""" if isinstance(f, dict): file_obj = f['file'] if 'filename' in f: file_name = f['filename'] else: file_name = file_obj.name else: file_obj = f file_name = f.name b64_data = base64.b64encode(file_obj.read()) return { 'id': file_name, 'data': b64_data.decode() if six.PY3 else b64_data, }
def send( self, email_id, recipient, email_data=None, sender=None, cc=None, bcc=None, tags=[], headers={}, esp_account=None, locale=None, email_version_name=None, inline=None, files=[], timeout=None ): """ API call to send an email """ if not email_data: email_data = {} # for backwards compatibility, will be removed if isinstance(recipient, string_types): warnings.warn( "Passing email directly for recipient is deprecated", DeprecationWarning) recipient = {'address': recipient} payload = { 'email_id': email_id, 'recipient': recipient, 'email_data': email_data } if sender: payload['sender'] = sender if cc: if not type(cc) == list: logger.error( 'kwarg cc must be type(list), got %s' % type(cc)) payload['cc'] = cc if bcc: if not type(bcc) == list: logger.error( 'kwarg bcc must be type(list), got %s' % type(bcc)) payload['bcc'] = bcc if tags: if not type(tags) == list: logger.error( 'kwarg tags must be type(list), got %s' % (type(tags))) payload['tags'] = tags if headers: if not type(headers) == dict: logger.error( 'kwarg headers must be type(dict), got %s' % ( type(headers) ) ) payload['headers'] = headers if esp_account: if not isinstance(esp_account, string_types): logger.error( 'kwarg esp_account must be a string, got %s' % ( type(esp_account) ) ) payload['esp_account'] = esp_account if locale: if not isinstance(locale, string_types): logger.error( 'kwarg locale must be a string, got %s' % (type(locale)) ) payload['locale'] = locale if email_version_name: if not isinstance(email_version_name, string_types): logger.error( 'kwarg email_version_name must be a string, got %s' % ( type(email_version_name))) payload['version_name'] = email_version_name if inline: payload['inline'] = self._make_file_dict(inline) if files: payload['files'] = [self._make_file_dict(f) for f in files] return self._api_request( self.SEND_ENDPOINT, self.HTTP_POST, payload=payload, timeout=timeout )
def _api_request(self, endpoint, http_method, *args, **kwargs): """Private method for api requests""" logger.debug(' > Queing batch api request for endpoint: %s' % endpoint) path = self._build_request_path(endpoint, absolute=False) logger.debug('\tpath: %s' % path) data = None if 'payload' in kwargs: data = kwargs['payload'] logger.debug('\tdata: %s' % data) command = { "path": path, "method": http_method } if data: command['body'] = data self._commands.append(command)
def execute(self, timeout=None): """Execute all currently queued batch commands""" logger.debug(' > Batch API request (length %s)' % len(self._commands)) auth = self._build_http_auth() headers = self._build_request_headers() logger.debug('\tbatch headers: %s' % headers) logger.debug('\tbatch command length: %s' % len(self._commands)) path = self._build_request_path(self.BATCH_ENDPOINT) data = json.dumps(self._commands, cls=self._json_encoder) r = requests.post( path, auth=auth, headers=headers, data=data, timeout=(self.DEFAULT_TIMEOUT if timeout is None else timeout) ) self._commands = [] logger.debug('\tresponse code:%s' % r.status_code) try: logger.debug('\tresponse: %s' % r.json()) except: logger.debug('\tresponse: %s' % r.content) return r
def get_group_tabs(self): """ Return instances of all other tabs that are members of the tab's tab group. """ if self.tab_group is None: raise ImproperlyConfigured( "%s requires a definition of 'tab_group'" % self.__class__.__name__) group_members = [t for t in self._registry if t.tab_group == self.tab_group] return [t() for t in group_members]
def _process_tabs(self, tabs, current_tab, group_current_tab): """ Process and prepare tabs. This includes steps like updating references to the current tab, filtering out hidden tabs, sorting tabs etc... Args: tabs: The list of tabs to process. current_tab: The reference to the currently loaded tab. group_current_tab: The reference to the active tab in the current tab group. For parent tabs, this is different than for the current tab group. Returns: Processed list of tabs. Note that the method may have side effects. """ # Update references to the current tab for t in tabs: t.current_tab = current_tab t.group_current_tab = group_current_tab # Filter out hidden tabs tabs = list(filter(lambda t: t.tab_visible, tabs)) # Sort remaining tabs in-place tabs.sort(key=lambda t: t.weight) return tabs
def get_context_data(self, **kwargs): """ Adds tab information to context. To retrieve a list of all group tab instances, use ``{{ tabs }}`` in your template. The id of the current tab is added as ``current_tab_id`` to the template context. If the current tab has a parent tab the parent's id is added to the template context as ``parent_tab_id``. Instances of all tabs of the parent level are added as ``parent_tabs`` to the context. If the current tab has children they are added to the template context as ``child_tabs``. """ context = super(TabView, self).get_context_data(**kwargs) # Update the context with kwargs, TemplateView doesn't do this. context.update(kwargs) # Add tabs and "current" references to context process_tabs_kwargs = { 'tabs': self.get_group_tabs(), 'current_tab': self, 'group_current_tab': self, } context['tabs'] = self._process_tabs(**process_tabs_kwargs) context['current_tab_id'] = self.tab_id # Handle parent tabs if self.tab_parent is not None: # Verify that tab parent is valid if self.tab_parent not in self._registry: msg = '%s has no attribute _is_tab' % self.tab_parent.__class__.__name__ raise ImproperlyConfigured(msg) # Get parent tab instance parent = self.tab_parent() # Add parent tabs to context process_parents_kwargs = { 'tabs': parent.get_group_tabs(), 'current_tab': self, 'group_current_tab': parent, } context['parent_tabs'] = self._process_tabs(**process_parents_kwargs) context['parent_tab_id'] = parent.tab_id # Handle child tabs if self.tab_id in self._children: process_children_kwargs = { 'tabs': [t() for t in self._children[self.tab_id]], 'current_tab': self, 'group_current_tab': None, } context['child_tabs'] = self._process_tabs(**process_children_kwargs) return context
def normalize_name(s): """Convert a string into a valid python attribute name. This function is called to convert ASCII strings to something that can pass as python attribute name, to be used with namedtuples. >>> str(normalize_name('class')) 'class_' >>> str(normalize_name('a-name')) 'a_name' >>> str(normalize_name('a n\u00e4me')) 'a_name' >>> str(normalize_name('Name')) 'Name' >>> str(normalize_name('')) '_' >>> str(normalize_name('1')) '_1' """ s = s.replace('-', '_').replace('.', '_').replace(' ', '_') if s in keyword.kwlist: return s + '_' s = '_'.join(slug(ss, lowercase=False) for ss in s.split('_')) if not s: s = '_' if s[0] not in string.ascii_letters + '_': s = '_' + s return s
def schema(tg): """ Convert the table and column descriptions of a `TableGroup` into specifications for the DB schema. :param ds: :return: A pair (tables, reference_tables). """ tables = {} for tname, table in tg.tabledict.items(): t = TableSpec.from_table_metadata(table) tables[t.name] = t for at in t.many_to_many.values(): tables[at.name] = at # We must determine the order in which tables must be created! ordered = OrderedDict() i = 0 # We loop through the tables repeatedly, and whenever we find one, which has all # referenced tables already in ordered, we move it from tables to ordered. while tables and i < 100: i += 1 for table in list(tables.keys()): if all((ref[1] in ordered) or ref[1] == table for ref in tables[table].foreign_keys): # All referenced tables are already created (or self-referential). ordered[table] = tables.pop(table) break if tables: # pragma: no cover raise ValueError('there seem to be cyclic dependencies between the tables') return list(ordered.values())
def write(self, _force=False, _exists_ok=False, **items): """ Creates a db file with the core schema. :param force: If `True` an existing db file will be overwritten. """ if self.fname and self.fname.exists(): raise ValueError('db file already exists, use force=True to overwrite') with self.connection() as db: for table in self.tables: db.execute(table.sql(translate=self.translate)) db.execute('PRAGMA foreign_keys = ON;') db.commit() refs = defaultdict(list) # collects rows in association tables. for t in self.tables: if t.name not in items: continue rows, keys = [], [] cols = {c.name: c for c in t.columns} for i, row in enumerate(items[t.name]): pk = row[t.primary_key[0]] \ if t.primary_key and len(t.primary_key) == 1 else None values = [] for k, v in row.items(): if k in t.many_to_many: assert pk at = t.many_to_many[k] atkey = tuple([at.name] + [c.name for c in at.columns]) for vv in v: fkey, context = self.association_table_context(t, k, vv) refs[atkey].append((pk, fkey, context)) else: col = cols[k] if isinstance(v, list): # Note: This assumes list-valued columns are of datatype string! v = (col.separator or ';').join( col.convert(vv) for vv in v) else: v = col.convert(v) if v is not None else None if i == 0: keys.append(col.name) values.append(v) rows.append(tuple(values)) insert(db, self.translate, t.name, keys, *rows) for atkey, rows in refs.items(): insert(db, self.translate, atkey[0], atkey[1:], *rows) db.commit()
def iterrows(lines_or_file, namedtuples=False, dicts=False, encoding='utf-8', **kw): """Convenience factory function for csv reader. :param lines_or_file: Content to be read. Either a file handle, a file path or a list\ of strings. :param namedtuples: Yield namedtuples. :param dicts: Yield dicts. :param encoding: Encoding of the content. :param kw: Keyword parameters are passed through to csv.reader. :return: A generator over the rows. """ if namedtuples and dicts: raise ValueError('either namedtuples or dicts can be chosen as output format') elif namedtuples: _reader = NamedTupleReader elif dicts: _reader = UnicodeDictReader else: _reader = UnicodeReader with _reader(lines_or_file, encoding=encoding, **fix_kw(kw)) as r: for item in r: yield item
def rewrite(fname, visitor, **kw): """Utility function to rewrite rows in tsv files. :param fname: Path of the dsv file to operate on. :param visitor: A callable that takes a line-number and a row as input and returns a \ (modified) row or None to filter out the row. :param kw: Keyword parameters are passed through to csv.reader/csv.writer. """ if not isinstance(fname, pathlib.Path): assert isinstance(fname, string_types) fname = pathlib.Path(fname) assert fname.is_file() with tempfile.NamedTemporaryFile(delete=False) as fp: tmp = pathlib.Path(fp.name) with UnicodeReader(fname, **kw) as reader_: with UnicodeWriter(tmp, **kw) as writer: for i, row in enumerate(reader_): row = visitor(i, row) if row is not None: writer.writerow(row) shutil.move(str(tmp), str(fname))
def filter_rows_as_dict(fname, filter_, **kw): """Rewrite a dsv file, filtering the rows. :param fname: Path to dsv file :param filter_: callable which accepts a `dict` with a row's data as single argument\ returning a `Boolean` indicating whether to keep the row (`True`) or to discard it \ `False`. :param kw: Keyword arguments to be passed `UnicodeReader` and `UnicodeWriter`. :return: The number of rows that have been removed. """ filter_ = DictFilter(filter_) rewrite(fname, filter_, **kw) return filter_.removed
def dump_grid(grid): """ Dump a single grid to its ZINC representation. """ header = 'ver:%s' % dump_str(str(grid._version), version=grid._version) if bool(grid.metadata): header += ' ' + dump_meta(grid.metadata, version=grid._version) columns = dump_columns(grid.column, version=grid._version) rows = dump_rows(grid) return '\n'.join([header, columns] + rows + [''])
def parse(grid_str, mode=MODE_ZINC, charset='utf-8'): ''' Parse the given Zinc text and return the equivalent data. ''' # Decode incoming text (or python3 will whine!) if isinstance(grid_str, six.binary_type): grid_str = grid_str.decode(encoding=charset) # Split the separate grids up, the grammar definition has trouble splitting # them up normally. This will truncate the newline off the end of the last # row. _parse = functools.partial(parse_grid, mode=mode, charset=charset) if mode == MODE_JSON: if isinstance(grid_str, six.string_types): grid_data = json.loads(grid_str) else: grid_data = grid_str if isinstance(grid_data, dict): return _parse(grid_data) else: return list(map(_parse, grid_data)) else: return list(map(_parse, GRID_SEP.split(grid_str.rstrip())))
def append(self, key, value=MARKER, replace=True): ''' Append the item to the metadata. ''' return self.add_item(key, value, replace=replace)
def extend(self, items, replace=True): ''' Append the items to the metadata. ''' if isinstance(items, dict) or isinstance(items, SortableDict): items = list(items.items()) for (key, value) in items: self.append(key, value, replace=replace)
def regular_polygon(cls, center, radius, n_vertices, start_angle=0, **kwargs): """Construct a regular polygon. Parameters ---------- center : array-like radius : float n_vertices : int start_angle : float, optional Where to put the first point, relative to `center`, in radians counter-clockwise starting from the horizontal axis. kwargs Other keyword arguments are passed to the |Shape| constructor. """ angles = (np.arange(n_vertices) * 2 * np.pi / n_vertices) + start_angle return cls(center + radius * np.array([np.cos(angles), np.sin(angles)]).T, **kwargs)
def circle(cls, center, radius, n_vertices=50, **kwargs): """Construct a circle. Parameters ---------- center : array-like radius : float n_vertices : int, optional Number of points to draw. Decrease for performance, increase for appearance. kwargs Other keyword arguments are passed to the |Shape| constructor. """ return cls.regular_polygon(center, radius, n_vertices, **kwargs)
def rectangle(cls, vertices, **kwargs): """Shortcut for creating a rectangle aligned with the screen axes from only two corners. Parameters ---------- vertices : array-like An array containing the ``[x, y]`` positions of two corners. kwargs Other keyword arguments are passed to the |Shape| constructor. """ bottom_left, top_right = vertices top_left = [bottom_left[0], top_right[1]] bottom_right = [top_right[0], bottom_left[1]] return cls([bottom_left, bottom_right, top_right, top_left], **kwargs)
def from_dict(cls, spec): """Create a |Shape| from a dictionary specification. Parameters ---------- spec : dict A dictionary with either the fields ``'center'`` and ``'radius'`` (for a circle), ``'center'``, ``'radius'``, and ``'n_vertices'`` (for a regular polygon), or ``'vertices'``. If only two vertices are given, they are assumed to be lower left and top right corners of a rectangle. Other fields are interpreted as keyword arguments. """ spec = spec.copy() center = spec.pop('center', None) radius = spec.pop('radius', None) if center and radius: return cls.circle(center, radius, **spec) vertices = spec.pop('vertices') if len(vertices) == 2: return cls.rectangle(vertices, **spec) return cls(vertices, **spec)
def _kwargs(self): """Keyword arguments for recreating the Shape from the vertices. """ return dict(color=self.color, velocity=self.velocity, colors=self.colors)
def scale(self, factor, center=None): """Resize the shape by a proportion (e.g., 1 is unchanged), in-place. Parameters ---------- factor : float or array-like If a scalar, the same factor will be applied in the x and y dimensions. center : array-like, optional Point around which to perform the scaling. If not passed, the center of the shape is used. """ factor = np.asarray(factor) if len(factor.shape): args = list(factor) else: args = [factor, factor] if center is not None: args.extend(center) self.poly.scale(*args) return self
def rotate(self, angle, center=None): """Rotate the shape, in-place. Parameters ---------- angle : float Angle to rotate, in radians counter-clockwise. center : array-like, optional Point about which to rotate. If not passed, the center of the shape will be used. """ args = [angle] if center is not None: args.extend(center) self.poly.rotate(*args) return self
def flip_x(self, center=None): """Flip the shape in the x direction, in-place. Parameters ---------- center : array-like, optional Point about which to flip. If not passed, the center of the shape will be used. """ if center is None: self.poly.flip() else: self.poly.flip(center[0])
def flip_y(self, center=None): """Flip the shape in the y direction, in-place. Parameters ---------- center : array-like, optional Point about which to flip. If not passed, the center of the shape will be used. """ if center is None: self.poly.flop() else: self.poly.flop(center[1]) return self
def flip(self, angle, center=None): """ Flip the shape in an arbitrary direction. Parameters ---------- angle : array-like The angle, in radians counter-clockwise from the horizontal axis, defining the angle about which to flip the shape (of a line through `center`). center : array-like, optional The point about which to flip. If not passed, the center of the shape will be used. """ return self.rotate(-angle, center=center).flip_y(center=center).rotate(angle, center=center)
def draw(self): """Draw the shape in the current OpenGL context. """ if self.enabled: self._vertex_list.colors = self._gl_colors self._vertex_list.vertices = self._gl_vertices self._vertex_list.draw(pyglet.gl.GL_TRIANGLES)
def update(self, dt): """Update the shape's position by moving it forward according to its velocity. Parameters ---------- dt : float """ self.translate(dt * self.velocity) self.rotate(dt * self.angular_velocity)
def _map_timezones(): """ Map the official Haystack timezone list to those recognised by pytz. """ tz_map = {} todo = HAYSTACK_TIMEZONES_SET.copy() for full_tz in pytz.all_timezones: # Finished case: if not bool(todo): # pragma: no cover # This is nearly impossible for us to cover, and an unlikely case. break # Case 1: exact match if full_tz in todo: tz_map[full_tz] = full_tz # Exact match todo.discard(full_tz) continue # Case 2: suffix match after '/' if '/' not in full_tz: continue (prefix, suffix) = full_tz.split('/',1) # Case 2 exception: full timezone contains more than one '/' -> ignore if '/' in suffix: continue if suffix in todo: tz_map[suffix] = full_tz todo.discard(suffix) continue return tz_map
def timezone(haystack_tz, version=LATEST_VER): """ Retrieve the Haystack timezone """ tz_map = get_tz_map(version=version) try: tz_name = tz_map[haystack_tz] except KeyError: raise ValueError('%s is not a recognised timezone on this host' \ % haystack_tz) return pytz.timezone(tz_name)
def timezone_name(dt, version=LATEST_VER): """ Determine an appropriate timezone for the given date/time object """ tz_rmap = get_tz_rmap(version=version) if dt.tzinfo is None: raise ValueError('%r has no timezone' % dt) # Easy case: pytz timezone. try: tz_name = dt.tzinfo.zone return tz_rmap[tz_name] except KeyError: # Not in timezone map pass except AttributeError: # Not a pytz-compatible tzinfo pass # Hard case, try to find one that's equivalent. Hopefully we don't get # many of these. Start by getting the current timezone offset, and a # timezone-naïve copy of the timestamp. offset = dt.utcoffset() dt_notz = dt.replace(tzinfo=None) if offset == datetime.timedelta(0): # UTC? return 'UTC' for olson_name, haystack_name in list(tz_rmap.items()): if pytz.timezone(olson_name).utcoffset(dt_notz) == offset: return haystack_name raise ValueError('Unable to get timezone of %r' % dt)
def _unescape(s, uri=False): """ Iterative parser for string escapes. """ out = '' while len(s) > 0: c = s[0] if c == '\\': # Backslash escape esc_c = s[1] if esc_c in ('u', 'U'): # Unicode escape out += six.unichr(int(s[2:6], base=16)) s = s[6:] continue else: if esc_c == 'b': out += '\b' elif esc_c == 'f': out += '\f' elif esc_c == 'n': out += '\n' elif esc_c == 'r': out += '\r' elif esc_c == 't': out += '\t' else: if uri and (esc_c == '#'): # \# is passed through with backslash. out += '\\' # Pass through out += esc_c s = s[2:] continue else: out += c s = s[1:] return out
def parse_grid(grid_data): """ Parse the incoming grid. """ try: # Split the grid up. grid_parts = NEWLINE_RE.split(grid_data) if len(grid_parts) < 2: raise ZincParseException('Malformed grid received', grid_data, 1, 1) # Grid and column metadata are the first two lines. grid_meta_str = grid_parts.pop(0) col_meta_str = grid_parts.pop(0) # First element is the grid metadata ver_match = VERSION_RE.match(grid_meta_str) if ver_match is None: raise ZincParseException( 'Could not determine version from %r' % grid_meta_str, grid_data, 1, 1) version = Version(ver_match.group(1)) # Now parse the rest of the grid accordingly try: grid_meta = hs_gridMeta[version].parseString(grid_meta_str, parseAll=True)[0] except pp.ParseException as pe: # Raise a new exception with the appropriate line number. raise ZincParseException( 'Failed to parse grid metadata: %s' % pe, grid_data, 1, pe.col) except: # pragma: no cover # Report an error to the log if we fail to parse something. LOG.debug('Failed to parse grid meta: %r', grid_meta_str) raise try: col_meta = hs_cols[version].parseString(col_meta_str, parseAll=True)[0] except pp.ParseException as pe: # Raise a new exception with the appropriate line number. raise ZincParseException( 'Failed to parse column metadata: %s' \ % reformat_exception(pe, 2), grid_data, 2, pe.col) except: # pragma: no cover # Report an error to the log if we fail to parse something. LOG.debug('Failed to parse column meta: %r', col_meta_str) raise row_grammar = hs_row[version] def _parse_row(row_num_and_data): (row_num, row) = row_num_and_data line_num = row_num + 3 try: return dict(zip(col_meta.keys(), row_grammar.parseString(row, parseAll=True)[0].asList())) except pp.ParseException as pe: # Raise a new exception with the appropriate line number. raise ZincParseException( 'Failed to parse row: %s' \ % reformat_exception(pe, line_num), grid_data, line_num, pe.col) except: # pragma: no cover # Report an error to the log if we fail to parse something. LOG.debug('Failed to parse row: %r', row) raise g = Grid(version=grid_meta.pop('ver'), metadata=grid_meta, columns=list(col_meta.items())) g.extend(map(_parse_row, filter(lambda gp : bool(gp[1]), enumerate(grid_parts)))) return g except: LOG.debug('Failing grid: %r', grid_data) raise
def parse_scalar(scalar_data, version): """ Parse a Project Haystack scalar in ZINC format. """ try: return hs_scalar[version].parseString(scalar_data, parseAll=True)[0] except pp.ParseException as pe: # Raise a new exception with the appropriate line number. raise ZincParseException( 'Failed to parse scalar: %s' % reformat_exception(pe), scalar_data, 1, pe.col) except: LOG.debug('Failing scalar data: %r (version %r)', scalar_data, version)
def add_item(self, key, value, after=False, index=None, pos_key=None, replace=True): """ Add an item at a specific location, possibly replacing the existing item. If after is True, we insert *after* the given index, otherwise we insert before. The position is specified using either index or pos_key, the former specifies the position from the start of the array (base 0). pos_key specifies the name of another key, and positions the new key relative to that key. When replacing, the position will be left un-changed unless a location is specified explicitly. """ if self._validate_fn: self._validate_fn(value) if (index is not None) and (pos_key is not None): raise ValueError('Either specify index or pos_key, not both.') elif pos_key is not None: try: index = self.index(pos_key) except ValueError: raise KeyError('%r not found' % pos_key) if after and (index is not None): # insert inserts *before* index, so increment by one. index += 1 if key in self._values: if not replace: raise KeyError('%r is duplicate' % key) if index is not None: # We are re-locating. del self[key] else: # We are updating self._values[key] = value return if index is not None: # Place at given position self._order.insert(index, key) else: # Place at end self._order.append(key) self._values[key] = value
def dump(grids, mode=MODE_ZINC): """ Dump the given grids in the specified over-the-wire format. """ if isinstance(grids, Grid): return dump_grid(grids, mode=mode) _dump = functools.partial(dump_grid, mode=mode) if mode == MODE_ZINC: return '\n'.join(map(_dump, grids)) elif mode == MODE_JSON: return '[%s]' % ','.join(map(_dump, grids)) else: # pragma: no cover raise NotImplementedError('Format not implemented: %s' % mode)
def to_haystack(unit): """ Some parsing tweaks to fit pint units / handling of edge cases. """ unit = str(unit) global HAYSTACK_CONVERSION global PINT_CONVERSION if unit == 'per_minute' or \ unit == '/min' or \ unit == 'per_second' or \ unit == '/s' or \ unit == 'per_hour' or \ unit == '/h' or \ unit == None: return '' # Those units are not units... they are impossible to fit anywhere in Pint for pint_value, haystack_value in PINT_CONVERSION: unit = unit.replace(pint_value, haystack_value) for haystack_value, pint_value in HAYSTACK_CONVERSION: if pint_value == '': continue unit = unit.replace(pint_value, haystack_value) return unit
def to_pint(unit): """ Some parsing tweaks to fit pint units / handling of edge cases. """ global HAYSTACK_CONVERSION if unit == 'per_minute' or \ unit == '/min' or \ unit == 'per_second' or \ unit == '/s' or \ unit == 'per_hour' or \ unit == '/h' or \ unit == None: return '' # Those units are not units... they are impossible to fit anywhere in Pint for haystack_value, pint_value in HAYSTACK_CONVERSION: unit = unit.replace(haystack_value, pint_value) return unit
def define_haystack_units(): """ Missing units found in project-haystack Added to the registry """ ureg = UnitRegistry() ureg.define('% = [] = percent') ureg.define('pixel = [] = px = dot = picture_element = pel') ureg.define('decibel = [] = dB') ureg.define('ppu = [] = parts_per_unit') ureg.define('ppm = [] = parts_per_million') ureg.define('ppb = [] = parts_per_billion') ureg.define('%RH = [] = percent_relative_humidity = percentRH') ureg.define('cubic_feet = ft ** 3 = cu_ft') ureg.define('cfm = cu_ft * minute = liter_per_second / 0.4719475') ureg.define('cfh = cu_ft * hour') ureg.define('cfs = cu_ft * second') ureg.define('VAR = volt * ampere') ureg.define('kVAR = 1000 * volt * ampere') ureg.define('MVAR = 1000000 * volt * ampere') ureg.define('inH2O = in_H2O') ureg.define('dry_air = []') ureg.define('gas = []') ureg.define('energy_efficiency_ratio = [] = EER') ureg.define('coefficient_of_performance = [] = COP') ureg.define('data_center_infrastructure_efficiency = [] = DCIE') ureg.define('power_usage_effectiveness = [] = PUE') ureg.define('formazin_nephelometric_unit = [] = fnu') ureg.define('nephelometric_turbidity_units = [] = ntu') ureg.define('power_factor = [] = PF') ureg.define('degree_day_celsius = [] = degdaysC') ureg.define('degree_day_farenheit = degree_day_celsius * 9 / 5 = degdaysF') ureg.define('footcandle = lumen / sq_ft = ftcd') ureg.define('Nm = newton * meter') ureg.define('%obsc = [] = percent_obscuration = percentobsc') ureg.define('cycle = []') ureg.define('cph = cycle / hour') ureg.define('cpm = cycle / minute') ureg.define('cps = cycle / second') ureg.define('hecto_cubic_foot = 100 * cubic_foot') ureg.define('tenths_second = second / 10') ureg.define('hundredths_second = second / 100') #ureg.define('irradiance = W / sq_meter = irr') # In the definition of project haystack, there's a redundancy as irr = W/m^2 # no need to use : watts_per_square_meter_irradiance # CURRENCY # I know...we won'T be able to convert right now ! ureg.define('australian_dollar = [] = AUD') ureg.define('british_pound = [] = GBP = £') ureg.define('canadian_dollar = [] = CAD') ureg.define('chinese_yuan = [] = CNY = 元') ureg.define('emerati_dirham = [] = AED') ureg.define('euro = [] = EUR = €') ureg.define('indian_rupee = [] = INR = ₹') ureg.define('japanese_yen = [] = JPY = ¥') ureg.define('russian_ruble = [] = RUB = руб') ureg.define('south_korean_won = [] = KRW = ₩') ureg.define('swedish_krona = [] = SEK = kr') ureg.define('swiss_franc = [] = CHF = Fr') ureg.define('taiwan_dollar = [] = TWD') ureg.define('us_dollar = [] = USD = $') ureg.define('new_israeli_shekel = [] = NIS') return ureg
def _detect_or_validate(self, val): ''' Detect the version used from the row content, or validate against the version if given. ''' if isinstance(val, list) \ or isinstance(val, dict) \ or isinstance(val, SortableDict) \ or isinstance(val, Grid): # Project Haystack 3.0 type. self._assert_version(VER_3_0)
def _assert_version(self, version): ''' Assert that the grid version is equal to or above the given value. If no version is set, set the version. ''' if self.nearest_version < version: if self._version_given: raise ValueError( 'Data type requires version %s' \ % version) else: self._version = version
def _cmp(self, other): """ Compare two Project Haystack version strings, then return -1 if self < other, 0 if self == other or 1 if self > other. """ if not isinstance(other, Version): other = Version(other) num1 = self.version_nums num2 = other.version_nums # Pad both to be the same length ver_len = max(len(num1), len(num2)) num1 += tuple([0 for n in range(len(num1), ver_len)]) num2 += tuple([0 for n in range(len(num2), ver_len)]) # Compare the versions for (p1, p2) in zip(num1, num2): if p1 < p2: return -1 elif p1 > p2: return 1 # All the same, compare the extra strings. # If a version misses the extra part; we consider that as coming *before*. if self.version_extra is None: if other.version_extra is None: return 0 else: return -1 elif other.version_extra is None: return 1 elif self.version_extra == other.version_extra: return 0 elif self.version_extra < other.version_extra: return -1 else: return 1
def nearest(self, ver): """ Retrieve the official version nearest the one given. """ if not isinstance(ver, Version): ver = Version(ver) if ver in OFFICIAL_VERSIONS: return ver # We might not have an exact match for that. # See if we have one that's newer than the grid we're looking at. versions = list(OFFICIAL_VERSIONS) versions.sort(reverse=True) best = None for candidate in versions: # Due to ambiguities, we might have an exact match and not know it. # '2.0' will not hash to the same value as '2.0.0', but both are # equivalent. if candidate == ver: # We can't beat this, make a note of the match for later return candidate # If we have not seen a better candidate, and this is older # then we may have to settle for that. if (best is None) and (candidate < ver): warnings.warn('This version of hszinc does not yet '\ 'support version %s, please seek a newer version '\ 'or file a bug. Closest (older) version supported is %s.'\ % (ver, candidate)) return candidate # Probably the best so far, but see if we can go closer if candidate > ver: best = candidate # Unhappy path, no best option? This should not happen. assert best is not None warnings.warn('This version of hszinc does not yet '\ 'support version %s, please seek a newer version '\ 'or file a bug. Closest (newer) version supported is %s.'\ % (ver, best)) return best
def encrypt_files(selected_host, only_link, file_name): """ Encrypts file with gpg and random generated password """ if ENCRYPTION_DISABLED: print('For encryption please install gpg') exit() passphrase = '%030x' % random.randrange(16**30) source_filename = file_name cmd = 'gpg --batch --symmetric --cipher-algo AES256 --passphrase-fd 0 ' \ '--output - {}'.format(source_filename) encrypted_output = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE) encrypted_data = encrypted_output.communicate(passphrase.encode())[0] return upload_files(encrypted_data, selected_host, only_link, file_name)+'#'+passphrase
def check_max_filesize(chosen_file, max_size): """ Checks file sizes for host """ if os.path.getsize(chosen_file) > max_size: return False else: return True
def parse_arguments(args, clone_list): """ Makes parsing arguments a function. """ returned_string="" host_number = args.host if args.show_list: print(generate_host_string(clone_list, "Available hosts: ")) exit() if args.decrypt: for i in args.files: print(decrypt_files(i)) exit() if args.files: for i in args.files: if args.limit_size: if args.host == host_number and host_number is not None: if not check_max_filesize(i, clone_list[host_number][3]): host_number = None for n, host in enumerate(clone_list): if not check_max_filesize(i, host[3]): clone_list[n] = None if not clone_list: print('None of the clones is able to support so big file.') if args.no_cloudflare: if args.host == host_number and host_number is not None and not clone_list[host_number][4]: print("This host uses Cloudflare, please choose different host.") exit(1) else: for n, host in enumerate(clone_list): if not host[4]: clone_list[n] = None clone_list = list(filter(None, clone_list)) if host_number is None or args.host != host_number: host_number = random.randrange(0, len(clone_list)) while True: try: if args.encrypt: returned_string = encrypt_files(clone_list[host_number], args.only_link, i) else: returned_string = upload_files(open(i, 'rb'), \ clone_list[host_number], args.only_link, i) if args.only_link: print(returned_string[0]) else: print(returned_string) except IndexError: #print('Selected server (' + clone_list[host_number][0] + ') is offline.') #print('Trying other host.') host_number = random.randrange(0, len(clone_list)) continue except IsADirectoryError: print('limf does not support directory upload, if you want to upload ' \ 'every file in directory use limf {}/*.'.format(i.replace('/', ''))) if args.log: with open(os.path.expanduser(args.logfile), "a+") as logfile: if args.only_link: logfile.write(returned_string[1]) else: logfile.write(returned_string) logfile.write("\n") break else: print("limf: try 'limf -h' for more information")
def upload_files(selected_file, selected_host, only_link, file_name): """ Uploads selected file to the host, thanks to the fact that every pomf.se based site has pretty much the same architecture. """ try: answer = requests.post( url=selected_host[0]+"upload.php", files={'files[]':selected_file}) file_name_1 = re.findall(r'"url": *"((h.+\/){0,1}(.+?))"[,\}]', \ answer.text.replace("\\", ""))[0][2] if only_link: return [selected_host[1]+file_name_1, "{}: {}{}".format(file_name, selected_host[1], file_name_1)] else: return "{}: {}{}".format(file_name, selected_host[1], file_name_1) except requests.exceptions.ConnectionError: print(file_name + ' couldn\'t be uploaded to ' + selected_host[0])
def swagger_ui_template_view(request): """ Serves Swagger UI page, default Swagger UI config is used but you can override the callable that generates the `<script>` tag by setting `cornice_swagger.swagger_ui_script_generator` in pyramid config, it defaults to 'cornice_swagger.views:swagger_ui_script_template' :param request: :return: """ script_generator = request.registry.settings.get( 'cornice_swagger.swagger_ui_script_generator', 'cornice_swagger.views:swagger_ui_script_template') package, callable = script_generator.split(':') imported_package = importlib.import_module(package) script_callable = getattr(imported_package, callable) template = pkg_resources.resource_string( 'cornice_swagger', 'templates/index.html').decode('utf8') html = Template(template).safe_substitute( ui_css_url=ui_css_url, ui_js_bundle_url=ui_js_bundle_url, ui_js_standalone_url=ui_js_standalone_url, swagger_ui_script=script_callable(request), ) return Response(html)
def open_api_json_view(request): """ :param request: :return: Generates JSON representation of Swagger spec """ doc = cornice_swagger.CorniceSwagger( cornice.service.get_services(), pyramid_registry=request.registry) kwargs = request.registry.settings['cornice_swagger.spec_kwargs'] my_spec = doc.generate(**kwargs) return my_spec
def swagger_ui_script_template(request, **kwargs): """ :param request: :return: Generates the <script> code that bootstraps Swagger UI, it will be injected into index template """ swagger_spec_url = request.route_url('cornice_swagger.open_api_path') template = pkg_resources.resource_string( 'cornice_swagger', 'templates/index_script_template.html' ).decode('utf8') return Template(template).safe_substitute( swagger_spec_url=swagger_spec_url, )
def decrypt_files(file_link): """ Decrypts file from entered links """ if ENCRYPTION_DISABLED: print('For decryption please install gpg') exit() try: parsed_link = re.findall(r'(.*/(.*))#(.{30})', file_link)[0] req = urllib.request.Request( parsed_link[0], data=None, headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) ' \ ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36' } ) #downloads the file using fake useragent file_response = urllib.request.urlopen(req) file_to_decrypt = file_response.read() #decrypts the data using piping to ggp decrypt_r, decrypt_w = os.pipe() cmd = 'gpg --batch --decrypt --passphrase-fd {}'.format(decrypt_r) decrypt_output = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE, \ pass_fds=(decrypt_r,)) os.close(decrypt_r) open(decrypt_w, 'w').write(parsed_link[2]) decrypted_data, stderr = decrypt_output.communicate(file_to_decrypt) with open(parsed_link[1], 'wb') as decrypted_file: decrypted_file.write(decrypted_data) return parsed_link[1] + ' is decrypted and saved.' except IndexError: return 'Please enter valid link.'
def set_value(request): """Set the value and returns *True* or *False*.""" key = request.matchdict['key'] _VALUES[key] = request.json_body return _VALUES.get(key)
def from_schema(self, schema_node, base_name=None): """ Creates a Swagger definition from a colander schema. :param schema_node: Colander schema to be transformed into a Swagger definition. :param base_name: Schema alternative title. :rtype: dict :returns: Swagger schema. """ return self._ref_recursive(self.type_converter(schema_node), self.ref, base_name)
def _ref_recursive(self, schema, depth, base_name=None): """ Dismantle nested swagger schemas into several definitions using JSON pointers. Note: This can be dangerous since definition titles must be unique. :param schema: Base swagger schema. :param depth: How many levels of the swagger object schemas should be split into swaggger definitions with JSON pointers. Default (0) is no split. You may use negative values to split everything. :param base_name: If schema doesn't have a name, the caller may provide it to be used as reference. :rtype: dict :returns: JSON pointer to the root definition schema, or the original definition if depth is zero. """ if depth == 0: return schema if schema['type'] != 'object': return schema name = base_name or schema['title'] pointer = self.json_pointer + name for child_name, child in schema.get('properties', {}).items(): schema['properties'][child_name] = self._ref_recursive(child, depth-1) self.definition_registry[name] = schema return {'$ref': pointer}
def from_schema(self, schema_node): """ Creates a list of Swagger params from a colander request schema. :param schema_node: Request schema to be transformed into Swagger. :param validators: Validators used in colander with the schema. :rtype: list :returns: List of Swagger parameters. """ params = [] for param_schema in schema_node.children: location = param_schema.name if location is 'body': name = param_schema.__class__.__name__ if name == 'body': name = schema_node.__class__.__name__ + 'Body' param = self.parameter_converter(location, param_schema) param['name'] = name if self.ref: param = self._ref(param) params.append(param) elif location in (('path', 'header', 'headers', 'querystring', 'GET')): for node_schema in param_schema.children: param = self.parameter_converter(location, node_schema) if self.ref: param = self._ref(param) params.append(param) return params
def from_path(self, path): """ Create a list of Swagger path params from a cornice service path. :type path: string :rtype: list """ path_components = path.split('/') param_names = [comp[1:-1] for comp in path_components if comp.startswith('{') and comp.endswith('}')] params = [] for name in param_names: param_schema = colander.SchemaNode(colander.String(), name=name) param = self.parameter_converter('path', param_schema) if self.ref: param = self._ref(param) params.append(param) return params
def _ref(self, param, base_name=None): """ Store a parameter schema and return a reference to it. :param schema: Swagger parameter definition. :param base_name: Name that should be used for the reference. :rtype: dict :returns: JSON pointer to the original parameter definition. """ name = base_name or param.get('title', '') or param.get('name', '') pointer = self.json_pointer + name self.parameter_registry[name] = param return {'$ref': pointer}
def from_schema_mapping(self, schema_mapping): """ Creates a Swagger response object from a dict of response schemas. :param schema_mapping: Dict with entries matching ``{status_code: response_schema}``. :rtype: dict :returns: Response schema. """ responses = {} for status, response_schema in schema_mapping.items(): response = {} if response_schema.description: response['description'] = response_schema.description else: raise CorniceSwaggerException('Responses must have a description.') for field_schema in response_schema.children: location = field_schema.name if location == 'body': title = field_schema.__class__.__name__ if title == 'body': title = response_schema.__class__.__name__ + 'Body' field_schema.title = title response['schema'] = self.definitions.from_schema(field_schema) elif location in ('header', 'headers'): header_schema = self.type_converter(field_schema) headers = header_schema.get('properties') if headers: # Response headers doesn't accept titles for header in headers.values(): header.pop('title') response['headers'] = headers pointer = response_schema.__class__.__name__ if self.ref: response = self._ref(response, pointer) responses[status] = response return responses
def _ref(self, resp, base_name=None): """ Store a response schema and return a reference to it. :param schema: Swagger response definition. :param base_name: Name that should be used for the reference. :rtype: dict :returns: JSON pointer to the original response definition. """ name = base_name or resp.get('title', '') or resp.get('name', '') pointer = self.json_pointer + name self.response_registry[name] = resp return {'$ref': pointer}
def generate(self, title=None, version=None, base_path=None, info=None, swagger=None, **kwargs): """Generate a Swagger 2.0 documentation. Keyword arguments may be used to provide additional information to build methods as such ignores. :param title: The name presented on the swagger document. :param version: The version of the API presented on the swagger document. :param base_path: The path that all requests to the API must refer to. :param info: Swagger info field. :param swagger: Extra fields that should be provided on the swagger documentation. :rtype: dict :returns: Full OpenAPI/Swagger compliant specification for the application. """ title = title or self.api_title version = version or self.api_version info = info or self.swagger.get('info', {}) swagger = swagger or self.swagger base_path = base_path or self.base_path swagger = swagger.copy() info.update(title=title, version=version) swagger.update(swagger='2.0', info=info, basePath=base_path) paths, tags = self._build_paths() # Update the provided tags with the extracted ones preserving order if tags: swagger.setdefault('tags', []) tag_names = {t['name'] for t in swagger['tags']} for tag in tags: if tag['name'] not in tag_names: swagger['tags'].append(tag) # Create/Update swagger sections with extracted values where not provided if paths: swagger.setdefault('paths', {}) merge_dicts(swagger['paths'], paths) definitions = self.definitions.definition_registry if definitions: swagger.setdefault('definitions', {}) merge_dicts(swagger['definitions'], definitions) parameters = self.parameters.parameter_registry if parameters: swagger.setdefault('parameters', {}) merge_dicts(swagger['parameters'], parameters) responses = self.responses.response_registry if responses: swagger.setdefault('responses', {}) merge_dicts(swagger['responses'], responses) return swagger
def _build_paths(self): """ Build the Swagger "paths" and "tags" attributes from cornice service definitions. """ paths = {} tags = [] for service in self.services: path, path_obj = self._extract_path_from_service(service) service_tags = getattr(service, 'tags', []) self._check_tags(service_tags) tags = self._get_tags(tags, service_tags) for method, view, args in service.definitions: if method.lower() in map(str.lower, self.ignore_methods): continue op = self._extract_operation_from_view(view, args) if any(ctype in op.get('consumes', []) for ctype in self.ignore_ctypes): continue # XXX: Swagger doesn't support different schemas for for a same method # with different ctypes as cornice. If this happens, you may ignore one # content-type from the documentation otherwise we raise an Exception # Related to https://github.com/OAI/OpenAPI-Specification/issues/146 previous_definition = path_obj.get(method.lower()) if previous_definition: raise CorniceSwaggerException(("Swagger doesn't support multiple " "views for a same method. You may " "ignore one.")) # If tag not defined and a default tag is provided if 'tags' not in op and self.default_tags: if callable(self.default_tags): op['tags'] = self.default_tags(service, method) else: op['tags'] = self.default_tags op_tags = op.get('tags', []) self._check_tags(op_tags) # Add service tags if service_tags: new_tags = service_tags + op_tags op['tags'] = list(OrderedDict.fromkeys(new_tags)) # Add method tags to root tags tags = self._get_tags(tags, op_tags) # If operation id is not defined and a default generator is provided if 'operationId' not in op and self.default_op_ids: if not callable(self.default_op_ids): raise CorniceSwaggerException('default_op_id should be a callable.') op['operationId'] = self.default_op_ids(service, method) # If security options not defined and default is provided if 'security' not in op and self.default_security: if callable(self.default_security): op['security'] = self.default_security(service, method) else: op['security'] = self.default_security if not isinstance(op.get('security', []), list): raise CorniceSwaggerException('security should be a list or callable') path_obj[method.lower()] = op paths[path] = path_obj return paths, tags
def _extract_path_from_service(self, service): """ Extract path object and its parameters from service definitions. :param service: Cornice service to extract information from. :rtype: dict :returns: Path definition. """ path_obj = {} path = service.path route_name = getattr(service, 'pyramid_route', None) # handle services that don't create fresh routes, # we still need the paths so we need to grab pyramid introspector to # extract that information if route_name: # avoid failure if someone forgets to pass registry registry = self.pyramid_registry or get_current_registry() route_intr = registry.introspector.get('routes', route_name) if route_intr: path = route_intr['pattern'] else: msg = 'Route `{}` is not found by ' \ 'pyramid introspector'.format(route_name) raise ValueError(msg) # handle traverse and subpath as regular parameters # docs.pylonsproject.org/projects/pyramid/en/latest/narr/hybrid.html for subpath_marker in ('*subpath', '*traverse'): path = path.replace(subpath_marker, '{subpath}') # Extract path parameters parameters = self.parameters.from_path(path) if parameters: path_obj['parameters'] = parameters return path, path_obj
def _extract_operation_from_view(self, view, args): """ Extract swagger operation details from colander view definitions. :param view: View to extract information from. :param args: Arguments from the view decorator. :rtype: dict :returns: Operation definition. """ op = { 'responses': { 'default': { 'description': 'UNDOCUMENTED RESPONSE' } }, } # If 'produces' are not defined in the view, try get from renderers renderer = args.get('renderer', '') if "json" in renderer: # allows for "json" or "simplejson" produces = ['application/json'] elif renderer == 'xml': produces = ['text/xml'] else: produces = None if produces: op.setdefault('produces', produces) # Get explicit accepted content-types consumes = args.get('content_type') if consumes is not None: # convert to a list, if it's not yet one consumes = to_list(consumes) # It is possible to add callables for content_type, so we have to # to filter those out, since we cannot evaluate those here. consumes = [x for x in consumes if not callable(x)] op['consumes'] = consumes # Get parameters from view schema is_colander = self._is_colander_schema(args) if is_colander: schema = self._extract_transform_colander_schema(args) parameters = self.parameters.from_schema(schema) else: # Bail out for now parameters = None if parameters: op['parameters'] = parameters # Get summary from docstring if isinstance(view, six.string_types): if 'klass' in args: ob = args['klass'] view_ = getattr(ob, view.lower()) docstring = trim(view_.__doc__) else: docstring = str(trim(view.__doc__)) if docstring and self.summary_docstrings: op['summary'] = docstring # Get response definitions if 'response_schemas' in args: op['responses'] = self.responses.from_schema_mapping(args['response_schemas']) # Get response tags if 'tags' in args: op['tags'] = args['tags'] # Get response operationId if 'operation_id' in args: op['operationId'] = args['operation_id'] # Get security policies if 'api_security' in args: op['security'] = args['api_security'] return op