Search is not available for this dataset
text
stringlengths
75
104k
def cells_to_series(cells, args): """Convert a CellImpl into a Series. `args` must be a sequence of argkeys. `args` can be longer or shorter then the number of cell's parameters. If shorter, then defaults are filled if any, else raise error. If longer, then redundant args are ignored. """ paramlen = len(cells.formula.parameters) is_multidx = paramlen > 1 if len(cells.data) == 0: data = {} indexes = None elif paramlen == 0: # Const Cells data = list(cells.data.values()) indexes = [np.nan] else: if len(args) > 0: defaults = tuple( param.default for param in cells.formula.signature.parameters.values() ) updated_args = [] for arg in args: if len(arg) > paramlen: arg = arg[:paramlen] elif len(arg) < paramlen: arg += defaults[len(arg) :] updated_args.append(arg) items = [ (arg, cells.data[arg]) for arg in updated_args if arg in cells.data ] else: items = [(key, value) for key, value in cells.data.items()] if not is_multidx: # Peel 1-element tuple items = [(key[0], value) for key, value in items] if len(items) == 0: indexes, data = None, {} else: indexes, data = zip(*items) if is_multidx: indexes = pd.MultiIndex.from_tuples(indexes) result = pd.Series(data=data, name=cells.name, index=indexes) if indexes is not None and any(i is not np.nan for i in indexes): result.index.names = list(cells.formula.parameters) return result
def clear_descendants(self, source, clear_source=True): """Remove all descendants of(reachable from) `source`. Args: source: Node descendants clear_source(bool): Remove origin too if True. Returns: set: The removed nodes. """ desc = nx.descendants(self, source) if clear_source: desc.add(source) self.remove_nodes_from(desc) return desc
def clear_obj(self, obj): """"Remove all nodes with `obj` and their descendants.""" obj_nodes = self.get_nodes_with(obj) removed = set() for node in obj_nodes: if self.has_node(node): removed.update(self.clear_descendants(node)) return removed
def get_nodes_with(self, obj): """Return nodes with `obj`.""" result = set() if nx.__version__[0] == "1": nodes = self.nodes_iter() else: nodes = self.nodes for node in nodes: if node[OBJ] == obj: result.add(node) return result
def add_path(self, nodes, **attr): """In replacement for Deprecated add_path method""" if nx.__version__[0] == "1": return super().add_path(nodes, **attr) else: return nx.add_path(self, nodes, **attr)
def rename(self, name): """Rename the model itself""" self._impl.system.rename_model(new_name=name, old_name=self.name)
def rename(self, name): """Rename self. Must be called only by its system.""" if is_valid_name(name): if name not in self.system.models: self.name = name return True # Rename success else: # Model name already exists return False else: raise ValueError("Invalid name '%s'." % name)
def clear_descendants(self, source, clear_source=True): """Clear values and nodes calculated from `source`.""" removed = self.cellgraph.clear_descendants(source, clear_source) for node in removed: del node[OBJ].data[node[KEY]]
def clear_obj(self, obj): """Clear values and nodes of `obj` and their dependants.""" removed = self.cellgraph.clear_obj(obj) for node in removed: del node[OBJ].data[node[KEY]]
def get_object(self, name): """Retrieve an object by a dotted name relative to the model.""" parts = name.split(".") space = self.spaces[parts.pop(0)] if parts: return space.get_object(".".join(parts)) else: return space
def restore_state(self, system): """Called after unpickling to restore some attributes manually.""" Impl.restore_state(self, system) BaseSpaceContainerImpl.restore_state(self, system) mapping = {} for node in self.cellgraph: if isinstance(node, tuple): name, key = node else: name, key = node, None cells = self.get_object(name) mapping[node] = get_node(cells, key, None) self.cellgraph = nx.relabel_nodes(self.cellgraph, mapping)
def get_dynamic_base(self, bases: tuple): """Create of get a base space for a tuple of bases""" try: return self._dynamic_bases_inverse[bases] except KeyError: name = self._dynamic_base_namer.get_next(self._dynamic_bases) base = self._new_space(name=name) self.spacegraph.add_space(base) self._dynamic_bases[name] = base self._dynamic_bases_inverse[bases] = base base.add_bases(bases) return base
def check_mro(self, bases): """Check if C3 MRO is possible with given bases""" try: self.add_node("temp") for base in bases: nx.DiGraph.add_edge(self, base, "temp") result = self.get_mro("temp")[1:] finally: self.remove_node("temp") return result
def get_command_names(): """ Returns a list of command names supported """ ret = [] for f in os.listdir(COMMAND_MODULE_PATH): if os.path.isfile(os.path.join(COMMAND_MODULE_PATH, f)) and f.endswith(COMMAND_MODULE_SUFFIX): ret.append(f[:-len(COMMAND_MODULE_SUFFIX)]) return ret
def get(vals, key, default_val=None): """ Returns a dictionary value """ val = vals for part in key.split('.'): if isinstance(val, dict): val = val.get(part, None) if val is None: return default_val else: return default_val return val
def parse_option_settings(option_settings): """ Parses option_settings as they are defined in the configuration file """ ret = [] for namespace, params in list(option_settings.items()): for key, value in list(params.items()): ret.append((namespace, key, value)) return ret
def parse_env_config(config, env_name): """ Parses an environment config """ all_env = get(config, 'app.all_environments', {}) env = get(config, 'app.environments.' + str(env_name), {}) return merge_dict(all_env, env)
def create_archive(directory, filename, config={}, ignore_predicate=None, ignored_files=['.git', '.svn']): """ Creates an archive from a directory and returns the file that was created. """ with zipfile.ZipFile(filename, 'w', compression=zipfile.ZIP_DEFLATED) as zip_file: root_len = len(os.path.abspath(directory)) # create it out("Creating archive: " + str(filename)) for root, dirs, files in os.walk(directory, followlinks=True): archive_root = os.path.abspath(root)[root_len + 1:] for f in files: fullpath = os.path.join(root, f) archive_name = os.path.join(archive_root, f) # ignore the file we're creating if filename in fullpath: continue # ignored files if ignored_files is not None: for name in ignored_files: if fullpath.endswith(name): out("Skipping: " + str(name)) continue # do predicate if ignore_predicate is not None: if not ignore_predicate(archive_name): out("Skipping: " + str(archive_name)) continue out("Adding: " + str(archive_name)) zip_file.write(fullpath, archive_name, zipfile.ZIP_DEFLATED) return filename
def add_config_files_to_archive(directory, filename, config={}): """ Adds configuration files to an existing archive """ with zipfile.ZipFile(filename, 'a') as zip_file: for conf in config: for conf, tree in list(conf.items()): if 'yaml' in tree: content = yaml.dump(tree['yaml'], default_flow_style=False) else: content = tree.get('content', '') out("Adding file " + str(conf) + " to archive " + str(filename)) file_entry = zipfile.ZipInfo(conf) file_entry.external_attr = tree.get('permissions', 0o644) << 16 zip_file.writestr(file_entry, content) return filename
def swap_environment_cnames(self, from_env_name, to_env_name): """ Swaps cnames for an environment """ self.ebs.swap_environment_cnames(source_environment_name=from_env_name, destination_environment_name=to_env_name)
def upload_archive(self, filename, key, auto_create_bucket=True): """ Uploads an application archive version to s3 """ try: bucket = self.s3.get_bucket(self.aws.bucket) if (( self.aws.region != 'us-east-1' and self.aws.region != 'eu-west-1') and bucket.get_location() != self.aws.region) or ( self.aws.region == 'us-east-1' and bucket.get_location() != '') or ( self.aws.region == 'eu-west-1' and bucket.get_location() != 'eu-west-1'): raise Exception("Existing bucket doesn't match region") except S3ResponseError: bucket = self.s3.create_bucket(self.aws.bucket, location=self.aws.region) def __report_upload_progress(sent, total): if not sent: sent = 0 if not total: total = 0 out("Uploaded " + str(sent) + " bytes of " + str(total) \ + " (" + str(int(float(max(1, sent)) / float(total) * 100)) + "%)") # upload the new version k = Key(bucket) k.key = self.aws.bucket_path + key k.set_metadata('time', str(time())) k.set_contents_from_filename(filename, cb=__report_upload_progress, num_cb=10)
def create_application(self, description=None): """ Creats an application and sets the helpers current app_name to the created application """ out("Creating application " + str(self.app_name)) self.ebs.create_application(self.app_name, description=description)
def delete_application(self): """ Creats an application and sets the helpers current app_name to the created application """ out("Deleting application " + str(self.app_name)) self.ebs.delete_application(self.app_name, terminate_env_by_force=True)
def application_exists(self): """ Returns whether or not the given app_name exists """ response = self.ebs.describe_applications(application_names=[self.app_name]) return len(response['DescribeApplicationsResponse']['DescribeApplicationsResult']['Applications']) > 0
def create_environment(self, env_name, version_label=None, solution_stack_name=None, cname_prefix=None, description=None, option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'): """ Creates a new environment """ out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type)) self.ebs.create_environment(self.app_name, env_name, version_label=version_label, solution_stack_name=solution_stack_name, cname_prefix=cname_prefix, description=description, option_settings=option_settings, tier_type=tier_type, tier_name=tier_name, tier_version=tier_version)
def environment_exists(self, env_name): """ Returns whether or not the given environment exists """ response = self.ebs.describe_environments(application_name=self.app_name, environment_names=[env_name], include_deleted=False) return len(response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']) > 0 \ and response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments'][0][ 'Status'] != 'Terminated'
def rebuild_environment(self, env_name): """ Rebuilds an environment """ out("Rebuilding " + str(env_name)) self.ebs.rebuild_environment(environment_name=env_name)
def get_environments(self): """ Returns the environments """ response = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False) return response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
def delete_environment(self, environment_name): """ Deletes an environment """ self.ebs.terminate_environment(environment_name=environment_name, terminate_resources=True)
def update_environment(self, environment_name, description=None, option_settings=[], tier_type=None, tier_name=None, tier_version='1.0'): """ Updates an application version """ out("Updating environment: " + str(environment_name)) messages = self.ebs.validate_configuration_settings(self.app_name, option_settings, environment_name=environment_name) messages = messages['ValidateConfigurationSettingsResponse']['ValidateConfigurationSettingsResult']['Messages'] ok = True for message in messages: if message['Severity'] == 'error': ok = False out("[" + message['Severity'] + "] " + str(environment_name) + " - '" \ + message['Namespace'] + ":" + message['OptionName'] + "': " + message['Message']) self.ebs.update_environment( environment_name=environment_name, description=description, option_settings=option_settings, tier_type=tier_type, tier_name=tier_name, tier_version=tier_version)
def environment_name_for_cname(self, env_cname): """ Returns an environment name for the given cname """ envs = self.get_environments() for env in envs: if env['Status'] != 'Terminated' \ and 'CNAME' in env \ and env['CNAME'] \ and env['CNAME'].lower().startswith(env_cname.lower() + '.'): return env['EnvironmentName'] return None
def deploy_version(self, environment_name, version_label): """ Deploys a version to an environment """ out("Deploying " + str(version_label) + " to " + str(environment_name)) self.ebs.update_environment(environment_name=environment_name, version_label=version_label)
def get_versions(self): """ Returns the versions available """ response = self.ebs.describe_application_versions(application_name=self.app_name) return response['DescribeApplicationVersionsResponse']['DescribeApplicationVersionsResult']['ApplicationVersions']
def create_application_version(self, version_label, key): """ Creates an application version """ out("Creating application version " + str(version_label) + " for " + str(key)) self.ebs.create_application_version(self.app_name, version_label, s3_bucket=self.aws.bucket, s3_key=self.aws.bucket_path+key)
def delete_unused_versions(self, versions_to_keep=10): """ Deletes unused versions """ # get versions in use environments = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False) environments = environments['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments'] versions_in_use = [] for env in environments: versions_in_use.append(env['VersionLabel']) # get all versions versions = self.ebs.describe_application_versions(application_name=self.app_name) versions = versions['DescribeApplicationVersionsResponse']['DescribeApplicationVersionsResult'][ 'ApplicationVersions'] versions = sorted(versions, reverse=True, key=functools.cmp_to_key(lambda x, y: (x['DateCreated'] > y['DateCreated']) - (x['DateCreated'] < y['DateCreated']))) # delete versions in use for version in versions[versions_to_keep:]: if version['VersionLabel'] in versions_in_use: out("Not deleting " + version["VersionLabel"] + " because it is in use") else: out("Deleting unused version: " + version["VersionLabel"]) self.ebs.delete_application_version(application_name=self.app_name, version_label=version['VersionLabel']) sleep(2)
def describe_events(self, environment_name, next_token=None, start_time=None): """ Describes events from the given environment """ events = self.ebs.describe_events( application_name=self.app_name, environment_name=environment_name, next_token=next_token, start_time=start_time + 'Z') return (events['DescribeEventsResponse']['DescribeEventsResult']['Events'], events['DescribeEventsResponse']['DescribeEventsResult']['NextToken'])
def wait_for_environments(self, environment_names, health=None, status=None, version_label=None, include_deleted=True, use_events=True): """ Waits for an environment to have the given version_label and to be in the green state """ # turn into a list if not isinstance(environment_names, (list, tuple)): environment_names = [environment_names] environment_names = environment_names[:] # print some stuff s = "Waiting for environment(s) " + (", ".join(environment_names)) + " to" if health is not None: s += " have health " + health else: s += " have any health" if version_label is not None: s += " and have version " + version_label if status is not None: s += " and have status " + status out(s) started = time() seen_events = list() for env_name in environment_names: (events, next_token) = self.describe_events(env_name, start_time=datetime.now().isoformat()) for event in events: seen_events.append(event) while True: # bail if they're all good if len(environment_names) == 0: break # wait sleep(10) # # get the env environments = self.ebs.describe_environments( application_name=self.app_name, environment_names=environment_names, include_deleted=include_deleted) environments = environments['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments'] if len(environments) <= 0: raise Exception("Couldn't find any environments") # loop through and wait for env in environments[:]: env_name = env['EnvironmentName'] # the message msg = "Environment " + env_name + " is " + str(env['Health']) if version_label is not None: msg = msg + " and has version " + str(env['VersionLabel']) if status is not None: msg = msg + " and has status " + str(env['Status']) # what we're doing good_to_go = True if health is not None: good_to_go = good_to_go and str(env['Health']) == health if status is not None: good_to_go = good_to_go and str(env['Status']) == status if version_label is not None: good_to_go = good_to_go and str(env['VersionLabel']) == version_label # allow a certain number of Red samples before failing if env['Status'] == 'Ready' and env['Health'] == 'Red': if 'RedCount' not in env: env['RedCount'] = 0 env['RedCount'] += 1 if env['RedCount'] > MAX_RED_SAMPLES: out('Deploy failed') raise Exception('Ready and red') # log it if good_to_go: out(msg + " ... done") environment_names.remove(env_name) else: out(msg + " ... waiting") # log events (events, next_token) = self.describe_events(env_name, start_time=datetime.now().isoformat()) for event in events: if event not in seen_events: out("["+event['Severity']+"] "+event['Message']) seen_events.append(event) # check the time elapsed = time() - started if elapsed > self.wait_time_secs: message = "Wait time for environment(s) {environments} to be {health} expired".format( environments=" and ".join(environment_names), health=(health or "Green") ) raise Exception(message)
def add_arguments(parser): """ adds arguments for the swap urls command """ parser.add_argument('-o', '--old-environment', help='Old environment name', required=True) parser.add_argument('-n', '--new-environment', help='New environment name', required=True)
def execute(helper, config, args): """ Swaps old and new URLs. If old_environment was active, new_environment will become the active environment """ old_env_name = args.old_environment new_env_name = args.new_environment # swap C-Names out("Assuming that {} is the currently active environment...".format(old_env_name)) out("Swapping environment cnames: {} will become active, {} will become inactive.".format(new_env_name, old_env_name)) helper.swap_environment_cnames(old_env_name, new_env_name) helper.wait_for_environments([old_env_name, new_env_name], status='Ready', include_deleted=False)
def execute(helper, config, args): """ dump command dumps things """ env = parse_env_config(config, args.environment) option_settings = env.get('option_settings', {}) settings = parse_option_settings(option_settings) for setting in settings: out(str(setting))
def cached_property(f): """Similar to `@property` but it calls the function just once and caches the result. The object has to can have ``__cache__`` attribute. If you define `__slots__` for optimization, the metaclass should be a :class:`CacheMeta`. """ @property @functools.wraps(f) def wrapped(self, name=f.__name__): try: cache = self.__cache__ except AttributeError: self.__cache__ = cache = {} try: return cache[name] except KeyError: cache[name] = rv = f(self) return rv return wrapped
def execute(helper, config, args): """ Lists environments """ envs = config.get('app', {}).get('environments', []) out("Parsed environments:") for name, conf in list(envs.items()): out('\t'+name) envs = helper.get_environments() out("Deployed environments:") for env in envs: if env['Status'] != 'Terminated': out('\t'+str(env['EnvironmentName'])+' ('+str(env['Status'])+', '+str(env['CNAME'])+')')
def execute(helper, config, args): """ The init command """ # check to see if the application exists if not helper.application_exists(): helper.create_application(get(config, 'app.description')) else: out("Application "+get(config, 'app.app_name')+" exists") # create environments environment_names = [] environments_to_wait_for_green = [] for env_name, env_config in list(get(config, 'app.environments').items()): environment_names.append(env_name) env_config = parse_env_config(config, env_name) if not helper.environment_exists(env_name): option_settings = parse_option_settings(env_config.get('option_settings', {})) helper.create_environment(env_name, solution_stack_name=env_config.get('solution_stack_name'), cname_prefix=env_config.get('cname_prefix', None), description=env_config.get('description', None), option_settings=option_settings, tier_name=env_config.get('tier_name'), tier_type=env_config.get('tier_type'), tier_version=env_config.get('tier_version'), version_label=args.version_label) environments_to_wait_for_green.append(env_name) else: out("Environment "+env_name) # get the environments environments_to_wait_for_term = [] if args.delete: environments = helper.get_environments() for env in environments: if env['EnvironmentName'] not in environment_names: if env['Status'] != 'Ready': out("Unable to delete "+env['EnvironmentName']+" because it's not in status Ready ("+env['Status']+")") else: out("Deleting environment: "+env['EnvironmentName']) helper.delete_environment(env['EnvironmentName']) environments_to_wait_for_term.append(env['EnvironmentName']) # wait if not args.dont_wait and len(environments_to_wait_for_green)>0: helper.wait_for_environments(environments_to_wait_for_green, status='Ready', include_deleted=False) if not args.dont_wait and len(environments_to_wait_for_term)>0: helper.wait_for_environments(environments_to_wait_for_term, status='Terminated', include_deleted=False) out("Application initialized") return 0
def execute(helper, config, args): """ Deletes an environment """ helper.delete_application() # wait if not args.dont_wait: # get environments environment_names = [] for env in helper.get_environments(): environment_names.append(env['EnvironmentName']) # wait for them helper.wait_for_environments(environment_names, status='Terminated') return 0
def execute(helper, config, args): """ Deletes an environment """ env_config = parse_env_config(config, args.environment) environments_to_wait_for_term = [] environments = helper.get_environments() for env in environments: if env['EnvironmentName'] == args.environment: if env['Status'] != 'Ready': out("Unable to delete " + env['EnvironmentName'] + " because it's not in status Ready (" + env['Status'] + ")") else: out("Deleting environment: "+env['EnvironmentName']) helper.delete_environment(env['EnvironmentName']) environments_to_wait_for_term.append(env['EnvironmentName']) if not args.dont_wait: helper.wait_for_environments(environments_to_wait_for_term, status='Terminated', include_deleted=True) out("Environment deleted") return 0
def execute(helper, config, args): """ Deploys to an environment """ version_label = args.version_label env_config = parse_env_config(config, args.environment) env_name = args.environment # upload or build an archive version_label = upload_application_archive( helper, env_config, archive=args.archive, directory=args.directory, version_label=version_label) import datetime start_time = datetime.datetime.utcnow().isoformat() + 'Z' # deploy it helper.deploy_version(env_name, version_label) # wait if not args.dont_wait: helper.wait_for_environments(env_name, status='Ready', version_label=version_label, include_deleted=False) # update it env = parse_env_config(config, env_name) option_settings = parse_option_settings(env.get('option_settings', {})) helper.update_environment(env_name, description=env.get('description', None), option_settings=option_settings, tier_type=env.get('tier_type'), tier_name=env.get('tier_name'), tier_version=env.get('tier_version')) # wait if not args.dont_wait: helper.wait_for_environments(env_name, health='Green', status='Ready', version_label=version_label, include_deleted=False) events = helper.ebs.describe_events(start_time=start_time, environment_name=env_name) import json if args.log_events_to_file: with open('ebs_events.json', 'w+') as f: json.dump(events, f) # delete unused helper.delete_unused_versions(versions_to_keep=int(get(config, 'app.versions_to_keep', 10)))
def add_arguments(parser): """ adds arguments for the deploy command """ parser.add_argument('-e', '--environment', help='Environment name', required=True) parser.add_argument('-w', '--dont-wait', help='Skip waiting for the init to finish', action='store_true') parser.add_argument('-l', '--version-label', help='Version label', required=False)
def execute(helper, config, args): """ Deploys to an environment """ env_config = parse_env_config(config, args.environment) cname_prefix = env_config.get('cname_prefix', None) env_name = args.environment # change version if args.version_label: helper.deploy_version(env_name, args.version_label) if not args.dont_wait: helper.wait_for_environments(env_name, status='Ready', version_label=args.version_label) # update it env = parse_env_config(config, env_name) option_settings = parse_option_settings(env.get('option_settings', {})) helper.update_environment(env_name, description=env.get('description', None), option_settings=option_settings, tier_type=env.get('tier_type'), tier_name=env.get('tier_name'), tier_version=env.get('tier_version')) # wait if not args.dont_wait: helper.wait_for_environments(env_name, health='Green', status='Ready', version_label=args.version_label) # delete unused helper.delete_unused_versions(versions_to_keep=int( get(config, 'app.versions_to_keep', 10) ))
def join_phonemes(*args): """Joins a Hangul letter from Korean phonemes.""" # Normalize arguments as onset, nucleus, coda. if len(args) == 1: # tuple of (onset, nucleus[, coda]) args = args[0] if len(args) == 2: args += (CODAS[0],) try: onset, nucleus, coda = args except ValueError: raise TypeError('join_phonemes() takes at most 3 arguments') offset = ( (ONSETS.index(onset) * NUM_NUCLEUSES + NUCLEUSES.index(nucleus)) * NUM_CODAS + CODAS.index(coda) ) return unichr(FIRST_HANGUL_OFFSET + offset)
def split_phonemes(letter, onset=True, nucleus=True, coda=True): """Splits Korean phonemes as known as "์ž์†Œ" from a Hangul letter. :returns: (onset, nucleus, coda) :raises ValueError: `letter` is not a Hangul single letter. """ if len(letter) != 1 or not is_hangul(letter): raise ValueError('Not Hangul letter: %r' % letter) offset = ord(letter) - FIRST_HANGUL_OFFSET phonemes = [None] * 3 if onset: phonemes[0] = ONSETS[offset // (NUM_NUCLEUSES * NUM_CODAS)] if nucleus: phonemes[1] = NUCLEUSES[(offset // NUM_CODAS) % NUM_NUCLEUSES] if coda: phonemes[2] = CODAS[offset % NUM_CODAS] return tuple(phonemes)
def combine_words(word1, word2): """Combines two words. If the first word ends with a vowel and the initial letter of the second word is only consonant, it merges them into one letter:: >>> combine_words(u'๋‹ค', u'ใ„บ') ๋‹ญ >>> combine_words(u'๊ฐ€์˜ค', u'ใ„ด๋ˆ„๋ฆฌ') ๊ฐ€์˜จ๋ˆ„๋ฆฌ """ if word1 and word2 and is_consonant(word2[0]): onset, nucleus, coda = split_phonemes(word1[-1]) if not coda: glue = join_phonemes(onset, nucleus, word2[0]) return word1[:-1] + glue + word2[1:] return word1 + word2
def index_particles(particles): """Indexes :class:`Particle` objects. It returns a regex pattern which matches to any particle morphs and a dictionary indexes the given particles by regex groups. """ patterns, indices = [], {} for x, p in enumerate(particles): group = u'_%d' % x indices[group] = x patterns.append(u'(?P<%s>%s)' % (group, p.regex_pattern())) pattern = re.compile(u'|'.join(patterns)) return pattern, indices
def execute(helper, config, args): """ Waits for an environment to be healthy """ helper.wait_for_environments(args.environment, health=args.health)
def execute(helper, config, args): """ Lists environments """ versions = helper.get_versions() out("Deployed versions:") for version in versions: out(version)
def add_arguments(parser): """ Args for the init command """ parser.add_argument('-e', '--environment', help='Environment name', required=False, nargs='+') parser.add_argument('-w', '--dont-wait', help='Skip waiting for the app to be deleted', action='store_true')
def execute(helper, config, args): """ Updates environments """ environments = [] if args.environment: for env_name in args.environment: environments.append(env_name) else: for env_name, env_config in list(get(config, 'app.environments').items()): environments.append(env_name) wait_environments = [] for env_name in environments: env = parse_env_config(config, env_name) option_settings = parse_option_settings(env.get('option_settings', {})) helper.update_environment(env_name, description=env.get('description', None), option_settings=option_settings, tier_type=env.get('tier_type'), tier_name=env.get('tier_name'), tier_version=env.get('tier_version')) wait_environments.append(env_name) # wait if not args.dont_wait: helper.wait_for_environments(wait_environments, health='Green', status='Ready')
def execute(helper, config, args): """ Describes recent events for an environment. """ environment_name = args.environment (events, next_token) = helper.describe_events(environment_name, start_time=datetime.now().isoformat()) # swap C-Names for event in events: print(("["+event['Severity']+"] "+event['Message']))
def execute(helper, config, args): """ Rebuilds an environment """ env_config = parse_env_config(config, args.environment) helper.rebuild_environment(args.environment) # wait if not args.dont_wait: helper.wait_for_environments(args.environment, health='Green', status='Ready')
def generate_tolerances(morph1, morph2): """Generates all reasonable tolerant particle morphs:: >>> set(generate_tolerances(u'์ด', u'๊ฐ€')) set([u'์ด(๊ฐ€)', u'(์ด)๊ฐ€', u'๊ฐ€(์ด)', u'(๊ฐ€)์ด']) >>> set(generate_tolerances(u'์ด๋ฉด', u'๋ฉด')) set([u'(์ด)๋ฉด']) """ if morph1 == morph2: # Tolerance not required. return if not (morph1 and morph2): # Null allomorph exists. yield u'(%s)' % (morph1 or morph2) return len1, len2 = len(morph1), len(morph2) if len1 != len2: longer, shorter = (morph1, morph2) if len1 > len2 else (morph2, morph1) if longer.endswith(shorter): # Longer morph ends with shorter morph. yield u'(%s)%s' % (longer[:-len(shorter)], shorter) return # Find common suffix between two morphs. for x, (let1, let2) in enumerate(zip(reversed(morph1), reversed(morph2))): if let1 != let2: break if x: # They share the common suffix. x1, x2 = len(morph1) - x, len(morph2) - x common_suffix = morph1[x1:] morph1, morph2 = morph1[:x1], morph2[:x2] else: # No similarity with each other. common_suffix = '' for morph1, morph2 in [(morph1, morph2), (morph2, morph1)]: yield u'%s(%s)%s' % (morph1, morph2, common_suffix) yield u'(%s)%s%s' % (morph1, morph2, common_suffix)
def parse_tolerance_style(style, registry=None): """Resolves a tolerance style of the given tolerant particle morph:: >>> parse_tolerance_style(u'์€(๋Š”)') 0 >>> parse_tolerance_style(u'(์€)๋Š”') 1 >>> parse_tolerance_style(OPTIONAL_MORPH2_AND_MORPH1) 3 """ if isinstance(style, integer_types): return style if registry is None: from . import registry particle = registry.parse(style) if len(particle.tolerances) != 4: raise ValueError('Set tolerance style by general allomorphic particle') return particle.tolerances.index(style)
def execute(helper, config, args): """ Lists solution stacks """ out("Available solution stacks") for stack in helper.list_available_solution_stacks(): out(" "+str(stack)) return 0
def add_arguments(parser): """ adds arguments for the deploy command """ parser.add_argument('-e', '--environment', help='Environment name', required=True) parser.add_argument('-w', '--dont-wait', help='Skip waiting', action='store_true') parser.add_argument('-a', '--archive', help='Archive file', required=False) parser.add_argument('-d', '--directory', help='Directory', required=False) parser.add_argument('-l', '--version-label', help='Version label', required=False) parser.add_argument('-t', '--termination-delay', help='Delay termination of old environment by this number of seconds', type=int, required=False)
def execute(helper, config, args): """ Deploys to an environment """ version_label = args.version_label archive = args.archive # get the environment configuration env_config = parse_env_config(config, args.environment) option_settings = parse_option_settings(env_config.get('option_settings', {})) cname_prefix = env_config.get('cname_prefix', None) # no zdt for anything but web server tier_name = env_config.get('tier_name', 'WebServer') if tier_name != 'WebServer': raise Exception( "Only able to do zero downtime deployments for " "WebServer tiers, can't do them for %s" % (tier_name, )) # find an available environment name out("Determining new environment name...") new_env_name = None if not helper.environment_exists(args.environment): new_env_name = args.environment else: for i in range(10): temp_env_name = args.environment + '-' + str(i) if not helper.environment_exists(temp_env_name): new_env_name = temp_env_name break if new_env_name is None: raise Exception("Unable to determine new environment name") out("New environment name will be " + new_env_name) # find an available cname name out("Determining new environment cname...") new_env_cname = None for i in range(10): temp_cname = cname_prefix + '-' + str(i) if not helper.environment_name_for_cname(temp_cname): new_env_cname = temp_cname break if new_env_cname is None: raise Exception("Unable to determine new environment cname") out("New environment cname will be " + new_env_cname) # upload or build an archive version_label = upload_application_archive( helper, env_config, archive=args.archive, directory=args.directory, version_label=version_label) # create the new environment helper.create_environment(new_env_name, solution_stack_name=env_config.get('solution_stack_name'), cname_prefix=new_env_cname, description=env_config.get('description', None), option_settings=option_settings, version_label=version_label, tier_name=tier_name, tier_type=env_config.get('tier_type'), tier_version=env_config.get('tier_version')) helper.wait_for_environments(new_env_name, status='Ready', health='Green', include_deleted=False) # find existing environment name old_env_name = helper.environment_name_for_cname(cname_prefix) if old_env_name is None: raise Exception("Unable to find current environment with cname: " + cname_prefix) out("Current environment name is " + old_env_name) # swap C-Names out("Swapping environment cnames") helper.swap_environment_cnames(old_env_name, new_env_name) helper.wait_for_environments([old_env_name, new_env_name], status='Ready', include_deleted=False) # delete the old environment if args.termination_delay: out("Termination delay specified, sleeping for {} seconds...".format(args.termination_delay)) time.sleep(args.termination_delay) out("Deleting old environment {}".format(old_env_name)) helper.delete_environment(old_env_name) # delete unused helper.delete_unused_versions(versions_to_keep=int(get(config, 'app.versions_to_keep', 10)))
def filter_only_significant(word): """Gets a word which removes insignificant letters at the end of the given word:: >>> pick_significant(u'๋„ฅ์Šจ(์ฝ”๋ฆฌ์•„)') ๋„ฅ์Šจ >>> pick_significant(u'๋ฉ”์ดํ”Œ์Šคํ† ๋ฆฌ...') ๋ฉ”์ดํ”Œ์Šคํ† ๋ฆฌ """ if not word: return word # Unwrap a complete parenthesis. if word.startswith(u'(') and word.endswith(u')'): return filter_only_significant(word[1:-1]) x = len(word) while x > 0: x -= 1 c = word[x] # Skip a complete parenthesis. if c == u')': m = INSIGNIFICANT_PARENTHESIS_PATTERN.search(word[:x + 1]) if m is not None: x = m.start() continue # Skip unreadable characters such as punctuations. unicode_category = unicodedata.category(c) if not SIGNIFICANT_UNICODE_CATEGORY_PATTERN.match(unicode_category): continue break return word[:x + 1]
def pick_coda_from_letter(letter): """Picks only a coda from a Hangul letter. It returns ``None`` if the given letter is not Hangul. """ try: __, __, coda = \ split_phonemes(letter, onset=False, nucleus=False, coda=True) except ValueError: return None else: return coda
def pick_coda_from_decimal(decimal): """Picks only a coda from a decimal.""" decimal = Decimal(decimal) __, digits, exp = decimal.as_tuple() if exp < 0: return DIGIT_CODAS[digits[-1]] __, digits, exp = decimal.normalize().as_tuple() index = bisect_right(EXP_INDICES, exp) - 1 if index < 0: return DIGIT_CODAS[digits[-1]] else: return EXP_CODAS[EXP_INDICES[index]]
def deposit_fetcher(record_uuid, data): """Fetch a deposit identifier. :param record_uuid: Record UUID. :param data: Record content. :returns: A :class:`invenio_pidstore.fetchers.FetchedPID` that contains data['_deposit']['id'] as pid_value. """ return FetchedPID( provider=DepositProvider, pid_type=DepositProvider.pid_type, pid_value=str(data['_deposit']['id']), )
def deposit_minter(record_uuid, data): """Mint a deposit identifier. A PID with the following characteristics is created: .. code-block:: python { "object_type": "rec", "object_uuid": record_uuid, "pid_value": "<new-pid-value>", "pid_type": "depid", } The following deposit meta information are updated: .. code-block:: python deposit['_deposit'] = { "id": "<new-pid-value>", "status": "draft", } :param record_uuid: Record UUID. :param data: Record content. :returns: A :class:`invenio_pidstore.models.PersistentIdentifier` object. """ provider = DepositProvider.create( object_type='rec', object_uuid=record_uuid, pid_value=uuid.uuid4().hex, ) data['_deposit'] = { 'id': provider.pid.pid_value, 'status': 'draft', } return provider.pid
def admin_permission_factory(): """Factory for creating a permission for an admin `deposit-admin-access`. If `invenio-access` module is installed, it returns a :class:`invenio_access.permissions.DynamicPermission` object. Otherwise, it returns a :class:`flask_principal.Permission` object. :returns: Permission instance. """ try: pkg_resources.get_distribution('invenio-access') from invenio_access.permissions import DynamicPermission as Permission except pkg_resources.DistributionNotFound: from flask_principal import Permission return Permission(action_admin_access)
def create_blueprint(endpoints): """Create Invenio-Deposit-UI blueprint. See: :data:`invenio_deposit.config.DEPOSIT_RECORDS_UI_ENDPOINTS`. :param endpoints: List of endpoints configuration. :returns: The configured blueprint. """ from invenio_records_ui.views import create_url_rule blueprint = Blueprint( 'invenio_deposit_ui', __name__, static_folder='../static', template_folder='../templates', url_prefix='', ) @blueprint.errorhandler(PIDDeletedError) def tombstone_errorhandler(error): """Render tombstone page.""" return render_template( current_app.config['DEPOSIT_UI_TOMBSTONE_TEMPLATE'], pid=error.pid, record=error.record or {}, ), 410 for endpoint, options in (endpoints or {}).items(): options = deepcopy(options) options.pop('jsonschema', None) options.pop('schemaform', None) blueprint.add_url_rule(**create_url_rule(endpoint, **options)) @blueprint.route('/deposit') @login_required def index(): """List user deposits.""" return render_template(current_app.config['DEPOSIT_UI_INDEX_TEMPLATE']) @blueprint.route('/deposit/new') @login_required def new(): """Create new deposit.""" deposit_type = request.values.get('type') return render_template( current_app.config['DEPOSIT_UI_NEW_TEMPLATE'], record={'_deposit': {'id': None}}, jsonschema=current_deposit.jsonschemas[deposit_type], schemaform=current_deposit.schemaforms[deposit_type], ) return blueprint
def default_view_method(pid, record, template=None): """Default view method. Sends ``record_viewed`` signal and renders template. """ record_viewed.send( current_app._get_current_object(), pid=pid, record=record, ) deposit_type = request.values.get('type') return render_template( template, pid=pid, record=record, jsonschema=current_deposit.jsonschemas[deposit_type], schemaform=current_deposit.schemaforms[deposit_type], )
def create(cls, object_type=None, object_uuid=None, **kwargs): """Create a new deposit identifier. :param object_type: The object type (Default: ``None``) :param object_uuid: The object UUID (Default: ``None``) :param kwargs: It contains the pid value. """ assert 'pid_value' in kwargs kwargs.setdefault('status', cls.default_status) return super(DepositProvider, cls).create( object_type=object_type, object_uuid=object_uuid, **kwargs)
def extract_actions_from_class(record_class): """Extract actions from class.""" for name in dir(record_class): method = getattr(record_class, name, None) if method and getattr(method, '__deposit_action__', False): yield method.__name__
def check_oauth2_scope(can_method, *myscopes): """Base permission factory that check OAuth2 scope and can_method. :param can_method: Permission check function that accept a record in input and return a boolean. :param myscopes: List of scopes required to permit the access. :returns: A :class:`flask_principal.Permission` factory. """ def check(record, *args, **kwargs): @require_api_auth() @require_oauth_scopes(*myscopes) def can(self): return can_method(record) return type('CheckOAuth2Scope', (), {'can': can})() return check
def can_elasticsearch(record): """Check if a given record is indexed. :param record: A record object. :returns: If the record is indexed returns `True`, otherwise `False`. """ search = request._methodview.search_class() search = search.get_record(str(record.id)) return search.count() == 1
def create_error_handlers(blueprint): """Create error handlers on blueprint.""" blueprint.errorhandler(PIDInvalidAction)(create_api_errorhandler( status=403, message='Invalid action' )) records_rest_error_handlers(blueprint)
def create_blueprint(endpoints): """Create Invenio-Deposit-REST blueprint. See: :data:`invenio_deposit.config.DEPOSIT_REST_ENDPOINTS`. :param endpoints: List of endpoints configuration. :returns: The configured blueprint. """ blueprint = Blueprint( 'invenio_deposit_rest', __name__, url_prefix='', ) create_error_handlers(blueprint) for endpoint, options in (endpoints or {}).items(): options = deepcopy(options) if 'files_serializers' in options: files_serializers = options.get('files_serializers') files_serializers = {mime: obj_or_import_string(func) for mime, func in files_serializers.items()} del options['files_serializers'] else: files_serializers = {} if 'record_serializers' in options: serializers = options.get('record_serializers') serializers = {mime: obj_or_import_string(func) for mime, func in serializers.items()} else: serializers = {} file_list_route = options.pop( 'file_list_route', '{0}/files'.format(options['item_route']) ) file_item_route = options.pop( 'file_item_route', '{0}/files/<path:key>'.format(options['item_route']) ) options.setdefault('search_class', DepositSearch) search_class = obj_or_import_string(options['search_class']) # records rest endpoints will use the deposit class as record class options.setdefault('record_class', Deposit) record_class = obj_or_import_string(options['record_class']) # backward compatibility for indexer class options.setdefault('indexer_class', None) for rule in records_rest_url_rules(endpoint, **options): blueprint.add_url_rule(**rule) search_class_kwargs = {} if options.get('search_index'): search_class_kwargs['index'] = options['search_index'] if options.get('search_type'): search_class_kwargs['doc_type'] = options['search_type'] ctx = dict( read_permission_factory=obj_or_import_string( options.get('read_permission_factory_imp') ), create_permission_factory=obj_or_import_string( options.get('create_permission_factory_imp') ), update_permission_factory=obj_or_import_string( options.get('update_permission_factory_imp') ), delete_permission_factory=obj_or_import_string( options.get('delete_permission_factory_imp') ), record_class=record_class, search_class=partial(search_class, **search_class_kwargs), default_media_type=options.get('default_media_type'), ) deposit_actions = DepositActionResource.as_view( DepositActionResource.view_name.format(endpoint), serializers=serializers, pid_type=options['pid_type'], ctx=ctx, ) blueprint.add_url_rule( '{0}/actions/<any({1}):action>'.format( options['item_route'], ','.join(extract_actions_from_class(record_class)), ), view_func=deposit_actions, methods=['POST'], ) deposit_files = DepositFilesResource.as_view( DepositFilesResource.view_name.format(endpoint), serializers=files_serializers, pid_type=options['pid_type'], ctx=ctx, ) blueprint.add_url_rule( file_list_route, view_func=deposit_files, methods=['GET', 'POST', 'PUT'], ) deposit_file = DepositFileResource.as_view( DepositFileResource.view_name.format(endpoint), serializers=files_serializers, pid_type=options['pid_type'], ctx=ctx, ) blueprint.add_url_rule( file_item_route, view_func=deposit_file, methods=['GET', 'PUT', 'DELETE'], ) return blueprint
def post(self, pid, record, action): """Handle deposit action. After the action is executed, a :class:`invenio_deposit.signals.post_action` signal is sent. Permission required: `update_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :param action: The action to execute. """ record = getattr(record, action)(pid=pid) db.session.commit() # Refresh the PID and record metadata db.session.refresh(pid) db.session.refresh(record.model) post_action.send(current_app._get_current_object(), action=action, pid=pid, deposit=record) response = self.make_response(pid, record, 202 if action == 'publish' else 201) endpoint = '.{0}_item'.format(pid.pid_type) location = url_for(endpoint, pid_value=pid.pid_value, _external=True) response.headers.extend(dict(Location=location)) return response
def get(self, pid, record): """Get files. Permission required: `read_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :returns: The files. """ return self.make_response(obj=record.files, pid=pid, record=record)
def post(self, pid, record): """Handle POST deposit files. Permission required: `update_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. """ # load the file uploaded_file = request.files['file'] # file name key = secure_filename( request.form.get('name') or uploaded_file.filename ) # check if already exists a file with this name if key in record.files: raise FileAlreadyExists() # add it to the deposit record.files[key] = uploaded_file.stream record.commit() db.session.commit() return self.make_response( obj=record.files[key].obj, pid=pid, record=record, status=201)
def put(self, pid, record): """Handle the sort of the files through the PUT deposit files. Expected input in body PUT: .. code-block:: javascript [ { "id": 1 }, { "id": 2 }, ... } Permission required: `update_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :returns: The files. """ try: ids = [data['id'] for data in json.loads( request.data.decode('utf-8'))] except KeyError: raise WrongFile() record.files.sort_by(*ids) record.commit() db.session.commit() return self.make_response(obj=record.files, pid=pid, record=record)
def get(self, pid, record, key, version_id, **kwargs): """Get file. Permission required: `read_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :param key: Unique identifier for the file in the deposit. :param version_id: File version. Optional. If no version is provided, the last version is retrieved. :returns: the file content. """ try: obj = record.files[str(key)].get_version(version_id=version_id) return self.make_response( obj=obj or abort(404), pid=pid, record=record) except KeyError: abort(404)
def put(self, pid, record, key): """Handle the file rename through the PUT deposit file. Permission required: `update_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :param key: Unique identifier for the file in the deposit. """ try: data = json.loads(request.data.decode('utf-8')) new_key = data['filename'] except KeyError: raise WrongFile() new_key_secure = secure_filename(new_key) if not new_key_secure or new_key != new_key_secure: raise WrongFile() try: obj = record.files.rename(str(key), new_key_secure) except KeyError: abort(404) record.commit() db.session.commit() return self.make_response(obj=obj, pid=pid, record=record)
def delete(self, pid, record, key): """Handle DELETE deposit file. Permission required: `update_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :param key: Unique identifier for the file in the deposit. """ try: del record.files[str(key)] record.commit() db.session.commit() return make_response('', 204) except KeyError: abort(404, 'The specified object does not exist or has already ' 'been deleted.')
def records(): """Load records.""" import pkg_resources from dojson.contrib.marc21 import marc21 from dojson.contrib.marc21.utils import create_record, split_blob from flask_login import login_user, logout_user from invenio_accounts.models import User from invenio_deposit.api import Deposit users = User.query.all() # pkg resources the demodata data_path = pkg_resources.resource_filename( 'invenio_records', 'data/marc21/bibliographic.xml' ) with open(data_path) as source: with current_app.test_request_context(): indexer = RecordIndexer() with db.session.begin_nested(): for index, data in enumerate(split_blob(source.read()), start=1): login_user(users[index % len(users)]) # do translate record = marc21.do(create_record(data)) # create record indexer.index(Deposit.create(record)) logout_user() db.session.commit()
def location(): """Load default location.""" d = current_app.config['DATADIR'] with db.session.begin_nested(): Location.query.delete() loc = Location(name='local', uri=d, default=True) db.session.add(loc) db.session.commit()
def jsonschemas(self): """Load deposit JSON schemas.""" _jsonschemas = { k: v['jsonschema'] for k, v in self.app.config['DEPOSIT_RECORDS_UI_ENDPOINTS'].items() if 'jsonschema' in v } return defaultdict( lambda: self.app.config['DEPOSIT_DEFAULT_JSONSCHEMA'], _jsonschemas )
def schemaforms(self): """Load deposit schema forms.""" _schemaforms = { k: v['schemaform'] for k, v in self.app.config['DEPOSIT_RECORDS_UI_ENDPOINTS'].items() if 'schemaform' in v } return defaultdict( lambda: self.app.config['DEPOSIT_DEFAULT_SCHEMAFORM'], _schemaforms )
def init_app(self, app): """Flask application initialization. Initialize the UI endpoints. Connect all signals if `DEPOSIT_REGISTER_SIGNALS` is ``True``. :param app: An instance of :class:`flask.Flask`. """ self.init_config(app) app.register_blueprint(ui.create_blueprint( app.config['DEPOSIT_RECORDS_UI_ENDPOINTS'] )) app.extensions['invenio-deposit'] = _DepositState(app) if app.config['DEPOSIT_REGISTER_SIGNALS']: post_action.connect(index_deposit_after_publish, sender=app, weak=False)
def init_app(self, app): """Flask application initialization. Initialize the REST endpoints. Connect all signals if `DEPOSIT_REGISTER_SIGNALS` is True. :param app: An instance of :class:`flask.Flask`. """ self.init_config(app) blueprint = rest.create_blueprint( app.config['DEPOSIT_REST_ENDPOINTS'] ) # FIXME: This is a temporary fix. This means that # invenio-records-rest's endpoint_prefixes cannot be used before # the first request or in other processes, ex: Celery tasks. @app.before_first_request def extend_default_endpoint_prefixes(): """Extend redirects between PID types.""" endpoint_prefixes = utils.build_default_endpoint_prefixes( dict(app.config['DEPOSIT_REST_ENDPOINTS']) ) current_records_rest = app.extensions['invenio-records-rest'] overlap = set(endpoint_prefixes.keys()) & set( current_records_rest.default_endpoint_prefixes ) if overlap: raise RuntimeError( 'Deposit wants to override endpoint prefixes {0}.'.format( ', '.join(overlap) ) ) current_records_rest.default_endpoint_prefixes.update( endpoint_prefixes ) app.register_blueprint(blueprint) app.extensions['invenio-deposit-rest'] = _DepositState(app) if app.config['DEPOSIT_REGISTER_SIGNALS']: post_action.connect(index_deposit_after_publish, sender=app, weak=False)
def deposit_links_factory(pid): """Factory for record links generation. The dictionary is formed as: .. code-block:: python { 'files': '/url/to/files', 'publish': '/url/to/publish', 'edit': '/url/to/edit', 'discard': '/url/to/discard', ... } :param pid: The record PID object. :returns: A dictionary that contains all the links. """ links = default_links_factory(pid) def _url(name, **kwargs): """URL builder.""" endpoint = '.{0}_{1}'.format( current_records_rest.default_endpoint_prefixes[pid.pid_type], name, ) return url_for(endpoint, pid_value=pid.pid_value, _external=True, **kwargs) links['files'] = _url('files') ui_endpoint = current_app.config.get('DEPOSIT_UI_ENDPOINT') if ui_endpoint is not None: links['html'] = ui_endpoint.format( host=request.host, scheme=request.scheme, pid_value=pid.pid_value, ) deposit_cls = Deposit if 'pid_value' in request.view_args: deposit_cls = request.view_args['pid_value'].data[1].__class__ for action in extract_actions_from_class(deposit_cls): links[action] = _url('actions', action=action) return links
def process_minter(value): """Load minter from PIDStore registry based on given value. :param value: Name of the minter. :returns: The minter. """ try: return current_pidstore.minters[value] except KeyError: raise click.BadParameter( 'Unknown minter {0}. Please use one of {1}.'.format( value, ', '.join(current_pidstore.minters.keys()) ) )
def process_schema(value): """Load schema from JSONSchema registry based on given value. :param value: Schema path, relative to the directory when it was registered. :returns: The schema absolute path. """ schemas = current_app.extensions['invenio-jsonschemas'].schemas try: return schemas[value] except KeyError: raise click.BadParameter( 'Unknown schema {0}. Please use one of:\n {1}'.format( value, '\n'.join(schemas.keys()) ) )
def json_serializer(pid, data, *args): """Build a JSON Flask response using the given data. :param pid: The `invenio_pidstore.models.PersistentIdentifier` of the record. :param data: The record metadata. :returns: A Flask response with JSON data. :rtype: :py:class:`flask.Response`. """ if data is not None: response = Response( json.dumps(data.dumps()), mimetype='application/json' ) else: response = Response(mimetype='application/json') return response
def file_serializer(obj): """Serialize a object. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :returns: A dictionary with the fields to serialize. """ return { "id": str(obj.file_id), "filename": obj.key, "filesize": obj.file.size, "checksum": obj.file.checksum, }
def json_files_serializer(objs, status=None): """JSON Files Serializer. :parma objs: A list of:class:`invenio_files_rest.models.ObjectVersion` instances. :param status: A HTTP Status. (Default: ``None``) :returns: A Flask response with JSON data. :rtype: :py:class:`flask.Response`. """ files = [file_serializer(obj) for obj in objs] return make_response(json.dumps(files), status)
def json_file_response(obj=None, pid=None, record=None, status=None): """JSON Files/File serializer. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or a :class:`invenio_records_files.api.FilesIterator` if it's a list of files. :param pid: PID value. (not used) :param record: The record metadata. (not used) :param status: The HTTP status code. :returns: A Flask response with JSON data. :rtype: :py:class:`flask.Response`. """ from invenio_records_files.api import FilesIterator if isinstance(obj, FilesIterator): return json_files_serializer(obj, status=status) else: return json_file_serializer(obj, status=status)
def index_deposit_after_publish(sender, action=None, pid=None, deposit=None): """Index the record after publishing. .. note:: if the record is not published, it doesn't index. :param sender: Who send the signal. :param action: Action executed by the sender. (Default: ``None``) :param pid: PID object. (Default: ``None``) :param deposit: Deposit object. (Default: ``None``) """ if action == 'publish': _, record = deposit.fetch_published() index_record.delay(str(record.id))
def index(method=None, delete=False): """Decorator to update index. :param method: Function wrapped. (Default: ``None``) :param delete: If `True` delete the indexed record. (Default: ``None``) """ if method is None: return partial(index, delete=delete) @wraps(method) def wrapper(self_or_cls, *args, **kwargs): """Send record for indexing.""" result = method(self_or_cls, *args, **kwargs) try: if delete: self_or_cls.indexer.delete(result) else: self_or_cls.indexer.index(result) except RequestError: current_app.logger.exception('Could not index {0}.'.format(result)) return result return wrapper
def has_status(method=None, status='draft'): """Check that deposit has a defined status (default: draft). :param method: Function executed if record has a defined status. (Default: ``None``) :param status: Defined status to check. (Default: ``'draft'``) """ if method is None: return partial(has_status, status=status) @wraps(method) def wrapper(self, *args, **kwargs): """Check current deposit status.""" if status != self.status: raise PIDInvalidAction() return method(self, *args, **kwargs) return wrapper