Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def delete_roles_request(request):
"""Submission to remove a role acceptance request."""
uuid_ = request.matchdict['uuid']
posted_roles = request.json
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
remove_role_requests(cursor, uuid_, posted_roles)
resp = request.response
resp.status_int = 200
return resp |
def get_acl(request):
"""Returns the ACL for the given content identified by ``uuid``."""
uuid_ = request.matchdict['uuid']
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT TRUE FROM document_controls WHERE uuid = %s""", (uuid_,))
try:
# Check that it exists
cursor.fetchone()[0]
except TypeError:
raise httpexceptions.HTTPNotFound()
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT uuid, user_id AS uid, permission
FROM document_acl AS acl
WHERE uuid = %s
ORDER BY user_id ASC, permission ASC
) as combined_rows""", (uuid_,))
acl = [r[0] for r in cursor.fetchall()]
return acl |
def post_acl_request(request):
"""Submission to create an ACL."""
uuid_ = request.matchdict['uuid']
posted = request.json
permissions = [(x['uid'], x['permission'],) for x in posted]
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT TRUE FROM document_controls WHERE uuid = %s::UUID""", (uuid_,))
try:
# Check that it exists
cursor.fetchone()[0]
except TypeError:
if request.has_permission('publish.create-identifier'):
cursor.execute("""\
INSERT INTO document_controls (uuid) VALUES (%s)""", (uuid_,))
else:
raise httpexceptions.HTTPNotFound()
upsert_acl(cursor, uuid_, permissions)
resp = request.response
resp.status_int = 202
return resp |
def delete_acl_request(request):
"""Submission to remove an ACL."""
uuid_ = request.matchdict['uuid']
posted = request.json
permissions = [(x['uid'], x['permission'],) for x in posted]
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
remove_acl(cursor, uuid_, permissions)
resp = request.response
resp.status_int = 200
return resp |
def processor(): # pragma: no cover
"""Churns over PostgreSQL notifications on configured channels.
This requires the application be setup and the registry be available.
This function uses the database connection string and a list of
pre configured channels.
"""
registry = get_current_registry()
settings = registry.settings
connection_string = settings[CONNECTION_STRING]
channels = _get_channels(settings)
# Code adapted from
# http://initd.org/psycopg/docs/advanced.html#asynchronous-notifications
with psycopg2.connect(connection_string) as conn:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
for channel in channels:
cursor.execute('LISTEN {}'.format(channel))
logger.debug('Waiting for notifications on channel "{}"'
.format(channel))
registry.notify(ChannelProcessingStartUpEvent())
rlist = [conn] # wait until ready for reading
wlist = [] # wait until ready for writing
xlist = [] # wait for an "exceptional condition"
timeout = 5
while True:
if select.select(rlist, wlist, xlist, timeout) != ([], [], []):
conn.poll()
while conn.notifies:
notif = conn.notifies.pop(0)
logger.debug('Got NOTIFY: pid={} channel={} payload={}'
.format(notif.pid, notif.channel,
notif.payload))
event = create_pg_notify_event(notif)
try:
registry.notify(event)
except Exception:
logger.exception('Logging an uncaught exception') |
def lookup_api_key_info():
"""Given a dbapi cursor, lookup all the api keys and their information."""
info = {}
with db_connect() as conn:
with conn.cursor() as cursor:
cursor.execute(ALL_KEY_INFO_SQL_STMT)
for row in cursor.fetchall():
id, key, name, groups = row
user_id = "api_key:{}".format(id)
info[key] = dict(id=id, user_id=user_id,
name=name, groups=groups)
return info |
def includeme(config):
"""Configuration include fuction for this module"""
api_key_authn_policy = APIKeyAuthenticationPolicy()
config.include('openstax_accounts')
openstax_authn_policy = config.registry.getUtility(
IOpenstaxAccountsAuthenticationPolicy)
# Set up api & user authentication policies.
policies = [api_key_authn_policy, openstax_authn_policy]
authn_policy = MultiAuthenticationPolicy(policies)
config.set_authentication_policy(authn_policy)
# Set up the authorization policy.
authz_policy = ACLAuthorizationPolicy()
config.set_authorization_policy(authz_policy) |
def expandvars_dict(settings):
"""Expands all environment variables in a settings dictionary."""
return dict(
(key, os.path.expandvars(value))
for key, value in settings.iteritems()
) |
def initialize_sentry_integration(): # pragma: no cover
"""\
Used to optionally initialize the Sentry service with this app.
See https://docs.sentry.io/platforms/python/pyramid/
"""
# This function is not under coverage because it is boilerplate
# from the Sentry documentation.
try:
import sentry_sdk
from sentry_sdk.integrations.pyramid import PyramidIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
except ImportError:
warnings.warn(
"Sentry is not configured because the Sentry SDK "
"(sentry_sdk package) is not installed",
UserWarning,
)
return # bail out early
try:
dsn = os.environ['SENTRY_DSN']
except KeyError:
warnings.warn(
"Sentry is not configured because SENTRY_DSN "
"was not supplied.",
UserWarning,
)
else:
sentry_sdk.init(
dsn=dsn,
integrations=[PyramidIntegration(), CeleryIntegration()],
) |
def task(**kwargs):
"""A function task decorator used in place of ``@celery_app.task``."""
def wrapper(wrapped):
def callback(scanner, name, obj):
celery_app = scanner.config.registry.celery_app
celery_app.task(**kwargs)(obj)
venusian.attach(wrapped, callback)
return wrapped
return wrapper |
def _make_celery_app(config):
"""This exposes the celery app. The app is actually created as part
of the configuration. However, this does make the celery app functional
as a stand-alone celery application.
This puts the pyramid configuration object on the celery app to be
used for making the registry available to tasks running inside the
celery worker process pool. See ``CustomTask.__call__``.
"""
# Tack the pyramid config on the celery app for later use.
config.registry.celery_app.conf['pyramid_config'] = config
return config.registry.celery_app |
def post_publication_processing(event, cursor):
"""Process post-publication events coming out of the database."""
module_ident, ident_hash = event.module_ident, event.ident_hash
celery_app = get_current_registry().celery_app
# Check baking is not already queued.
cursor.execute('SELECT result_id::text '
'FROM document_baking_result_associations '
'WHERE module_ident = %s', (module_ident,))
for result in cursor.fetchall():
state = celery_app.AsyncResult(result[0]).state
if state in ('QUEUED', 'STARTED', 'RETRY'):
logger.debug('Already queued module_ident={} ident_hash={}'.format(
module_ident, ident_hash))
return
logger.debug('Queued for processing module_ident={} ident_hash={}'.format(
module_ident, ident_hash))
recipe_ids = _get_recipe_ids(module_ident, cursor)
update_module_state(cursor, module_ident, 'processing', recipe_ids[0])
# Commit the state change before preceding.
cursor.connection.commit()
# Start of task
# FIXME Looking up the task isn't the most clear usage here.
task_name = 'cnxpublishing.subscribers.baking_processor'
baking_processor = celery_app.tasks[task_name]
result = baking_processor.delay(module_ident, ident_hash)
baking_processor.backend.store_result(result.id, None, 'QUEUED')
# Save the mapping between a celery task and this event.
track_baking_proc_state(result, module_ident, cursor) |
def parse_archive_uri(uri):
"""Given an archive URI, parse to a split ident-hash."""
parsed = urlparse(uri)
path = parsed.path.rstrip('/').split('/')
ident_hash = path[-1]
ident_hash = unquote(ident_hash)
return ident_hash |
def declare_api_routes(config):
"""Declaration of routing"""
add_route = config.add_route
add_route('get-content', '/contents/{ident_hash}')
add_route('get-resource', '/resources/{hash}')
# User actions API
add_route('license-request', '/contents/{uuid}/licensors')
add_route('roles-request', '/contents/{uuid}/roles')
add_route('acl-request', '/contents/{uuid}/permissions')
# Publishing API
add_route('publications', '/publications')
add_route('get-publication', '/publications/{id}')
add_route('publication-license-acceptance',
'/publications/{id}/license-acceptances/{uid}')
add_route('publication-role-acceptance',
'/publications/{id}/role-acceptances/{uid}')
# TODO (8-May-12017) Remove because the term collate is being phased out.
add_route('collate-content', '/contents/{ident_hash}/collate-content')
add_route('bake-content', '/contents/{ident_hash}/baked')
# Moderation routes
add_route('moderation', '/moderations')
add_route('moderate', '/moderations/{id}')
add_route('moderation-rss', '/feeds/moderations.rss')
# API Key routes
add_route('api-keys', '/api-keys')
add_route('api-key', '/api-keys/{id}') |
def declare_browsable_routes(config):
"""Declaration of routes that can be browsed by users."""
# This makes our routes slashed, which is good browser behavior.
config.add_notfound_view(default_exceptionresponse_view,
append_slash=True)
add_route = config.add_route
add_route('admin-index', '/a/')
add_route('admin-moderation', '/a/moderation/')
add_route('admin-api-keys', '/a/api-keys/')
add_route('admin-add-site-messages', '/a/site-messages/',
request_method='GET')
add_route('admin-add-site-messages-POST', '/a/site-messages/',
request_method='POST')
add_route('admin-delete-site-messages', '/a/site-messages/',
request_method='DELETE')
add_route('admin-edit-site-message', '/a/site-messages/{id}/',
request_method='GET')
add_route('admin-edit-site-message-POST', '/a/site-messages/{id}/',
request_method='POST')
add_route('admin-content-status', '/a/content-status/')
add_route('admin-content-status-single', '/a/content-status/{uuid}')
add_route('admin-print-style', '/a/print-style/')
add_route('admin-print-style-single', '/a/print-style/{style}') |
def includeme(config):
"""Declare all routes."""
config.include('pyramid_jinja2')
config.add_jinja2_renderer('.html')
config.add_jinja2_renderer('.rss')
config.add_static_view(name='/a/static', path="cnxpublishing:static/")
# Commit the configuration otherwise the jija2_env won't have
# a `globals` assignment.
config.commit()
# Place a few globals in the template environment.
from cnxdb.ident_hash import join_ident_hash
for ext in ('.html', '.rss',):
jinja2_env = config.get_jinja2_environment(ext)
jinja2_env.globals.update(
join_ident_hash=join_ident_hash,
)
declare_api_routes(config)
declare_browsable_routes(config) |
def _formatter_callback_factory(): # pragma: no cover
"""Returns a list of includes to be given to `cnxepub.collation.collate`.
"""
includes = []
exercise_url_template = '{baseUrl}/api/exercises?q={field}:"{{itemCode}}"'
settings = get_current_registry().settings
exercise_base_url = settings.get('embeddables.exercise.base_url', None)
exercise_matches = [match.split(',', 1) for match in aslist(
settings.get('embeddables.exercise.match', ''), flatten=False)]
exercise_token = settings.get('embeddables.exercise.token', None)
mathml_url = settings.get('mathmlcloud.url', None)
memcache_servers = settings.get('memcache_servers')
if memcache_servers:
memcache_servers = memcache_servers.split()
else:
memcache_servers = None
if exercise_base_url and exercise_matches:
mc_client = None
if memcache_servers:
mc_client = memcache.Client(memcache_servers, debug=0)
for (exercise_match, exercise_field) in exercise_matches:
template = exercise_url_template.format(
baseUrl=exercise_base_url, field=exercise_field)
includes.append(exercise_callback_factory(exercise_match,
template,
mc_client,
exercise_token,
mathml_url))
return includes |
def bake(binder, recipe_id, publisher, message, cursor):
"""Given a `Binder` as `binder`, bake the contents and
persist those changes alongside the published content.
"""
recipe = _get_recipe(recipe_id, cursor)
includes = _formatter_callback_factory()
binder = collate_models(binder, ruleset=recipe, includes=includes)
def flatten_filter(model):
return (isinstance(model, cnxepub.CompositeDocument) or
(isinstance(model, cnxepub.Binder) and
model.metadata.get('type') == 'composite-chapter'))
def only_documents_filter(model):
return isinstance(model, cnxepub.Document) \
and not isinstance(model, cnxepub.CompositeDocument)
for doc in cnxepub.flatten_to(binder, flatten_filter):
publish_composite_model(cursor, doc, binder, publisher, message)
for doc in cnxepub.flatten_to(binder, only_documents_filter):
publish_collated_document(cursor, doc, binder)
tree = cnxepub.model_to_tree(binder)
publish_collated_tree(cursor, tree)
return [] |
def volcano(differential_dfs, title='Axial Volcano Plot', scripts_mode="CDN", data_mode="directory",
organism="human", q_value_column_name="q", log2FC_column_name="logFC",
output_dir=".", filename="volcano.html", version=this_version):
"""
Arguments:
differential_dfs (dict or pandas.DataFrame): python dict of names to pandas dataframes, or a single dataframe, indexed by gene symbols which must have columns named log2FC and qval.
title (str): The title of the plot (to be embedded in the html).
scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]:
- `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN,
- `"directory"` compiles a directory with all scripts locally cached,
- `"inline"` compiles a single HTML file with all scripts/styles inlined.
data_mode (str): Choose from ["directory", "inline"]:
- "directory" compiles a directory with all data locally cached,
- "inline" compiles a single HTML file with all data inlined.
organism (str): `"human"` or `"mouse"`
q_value_column_name (str):
log2FC_column_name (str):
output_dir (str): the directory in which to output the file
filename (str): the filename of the output file
version (str): the version of the javascripts to use.
Leave the default to pin the version, or choose "latest" to get updates,
or choose part of the version string to get minor updates.
Returns:
Path: The filepath which the html was outputted to.
"""
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
# Data =======================
if isinstance(differential_dfs, pd.DataFrame):
differential_dfs = {'differential': differential_dfs}
for name, df in differential_dfs.items():
df = df[[q_value_column_name, log2FC_column_name]]
df.columns = ['q', 'logFC']
df = df.round(2)
# TODO drop all zero rows
_verify_differential_df(df)
del differential_dfs[name]
differential_dfs[_sanitize(name)] = df
names_and_differentials = f"var names_and_differentials = { '{'+ ','.join([_quote(name)+': '+df.to_json(orient='index') for name, df in differential_dfs.items()]) +'}' };"
data_block = _data_block(data_mode, [('names_and_differentials', names_and_differentials)], output_dir, include_gene_sets=False, organism=organism)
# Scripts =======================
scripts = third_party_scripts + [CDN_url(version)+"js/util.js", CDN_url(version)+"js/GOrilla.js", CDN_url(version)+"js/volcano.js"]
scripts_block = _scripts_block(scripts, scripts_mode, output_dir)
html = templateEnv.get_template('volcano.html.j2').render(title=title, scripts_block=scripts_block+'\n'+data_block, organism="HOMO_SAPIENS")
(output_dir / filename).write_text(html)
return (output_dir / filename).resolve() |
def heatmap(genes_by_samples_matrix, sample_attributes, title='Axial Heatmap', scripts_mode="CDN", data_mode="directory",
organism="human", separate_zscore_by=["system"],
output_dir=".", filename="heatmap.html", version=this_version):
"""
Arguments:
genes_by_samples_matrix (pandas.DataFrame): dataframe indexed by genes, columns are samples
sample_attributes (pandas.DataFrame): dataframe indexed by samples, columns are sample attributes (e.g. classes)
title (str): The title of the plot (to be embedded in the html).
scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]:
- `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN,
- `"directory"` compiles a directory with all scripts locally cached,
- `"inline"` compiles a single HTML file with all scripts/styles inlined.
data_mode (str): Choose from ["directory", "inline"]:
- "directory" compiles a directory with all data locally cached,
- "inline" compiles a single HTML file with all data inlined.
organism (str): `"human"` or `"mouse"`
separate_zscore_by (list):
output_dir (str): the directory in which to output the file
filename (str): the filename of the output file
version (str): the version of the javascripts to use.
Leave the default to pin the version, or choose "latest" to get updates,
or choose part of the version string to get minor updates.
Returns:
Path: The filepath which the html was outputted to.
"""
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
# Data =======================
_verify_sample_by_genes_matrix(genes_by_samples_matrix)
_verify_sample_attributes(genes_by_samples_matrix, sample_attributes)
genes_by_samples_matrix = genes_by_samples_matrix.round(2)
# TODO drop all zero rows
matrix = f"var matrix = {genes_by_samples_matrix.to_json(orient='columns')};"
classes = f"var classes = {sample_attributes.to_json(orient='index')};"
data_block = _data_block(data_mode, [('matrix', matrix), ('classes', classes)], output_dir, organism=organism)
# Scripts =======================
scripts = third_party_scripts + [CDN_url(version)+"js/util.js", CDN_url(version)+"js/reorder.js", CDN_url(version)+"js/heatmap.js"]
scripts_block = _scripts_block(scripts, scripts_mode, output_dir)
html = templateEnv.get_template('heatmap.html.j2').render(title=title, scripts_block=scripts_block+'\n'+data_block, separate_zscore_by=separate_zscore_by)
(output_dir / filename).write_text(html)
return (output_dir / filename).resolve() |
def graph(networkx_graph, title='Axial Graph Visualization', scripts_mode="CDN", data_mode="directory",
output_dir=".", filename="graph.html", version=this_version):
"""
Arguments:
networkx_graph (networkx.Graph): any instance of networkx.Graph
title (str): The title of the plot (to be embedded in the html).
scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]:
- `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN,
- `"directory"` compiles a directory with all scripts locally cached,
- `"inline"` compiles a single HTML file with all scripts/styles inlined.
data_mode (str): Choose from ["directory", "inline"]:
- "directory" compiles a directory with all data locally cached,
- "inline" compiles a single HTML file with all data inlined.
output_dir (str): the directory in which to output the file
filename (str): the filename of the output file
version (str): the version of the javascripts to use.
Leave the default to pin the version, or choose "latest" to get updates,
or choose part of the version string to get minor updates.
Returns:
Path: The filepath which the html was outputted to.
"""
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
# Scripts =======================
scripts = third_party_scripts + [CDN_url(version)+"js/cola.min.js", CDN_url(version)+"js/graph.js"]
scripts_block = _scripts_block(scripts, scripts_mode, output_dir)
# Data =======================
graph_json = nx_json.node_link_data(networkx_graph)
for node in graph_json['nodes']:
for attr, val in node.items():
if isinstance(val, numbers.Number):
node[attr] = round(val, 2)
for link in graph_json['links']:
for attr, val in link.items():
if isinstance(val, numbers.Number):
link[attr] = round(val, 2)
graph_json = f"var graph = {json.dumps(graph_json)};"
data_block = _data_block(data_mode, [('graph', graph_json)], output_dir)
html = templateEnv.get_template('graph.html.j2').render(title=title, scripts_block=scripts_block+'\n'+data_block, nodes=networkx_graph.nodes())
(output_dir / filename).write_text(html)
return (output_dir / filename).resolve() |
def db_connect(connection_string=None, **kwargs):
"""Function to supply a database connection object."""
if connection_string is None:
connection_string = get_current_registry().settings[CONNECTION_STRING]
db_conn = psycopg2.connect(connection_string, **kwargs)
try:
with db_conn:
yield db_conn
finally:
db_conn.close() |
def with_db_cursor(func):
"""Decorator that supplies a cursor to the function.
This passes in a psycopg2 Cursor as the argument 'cursor'.
It also accepts a cursor if one is given.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
if 'cursor' in kwargs or func.func_code.co_argcount == len(args):
return func(*args, **kwargs)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
kwargs['cursor'] = cursor
return func(*args, **kwargs)
return wrapped |
def _role_type_to_db_type(type_):
"""Translates a role type (a value found in
``cnxepub.ATTRIBUTED_ROLE_KEYS``) to a database compatible
value for ``role_types``.
"""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
WITH unnested_role_types AS (
SELECT unnest(enum_range(NULL::role_types)) as role_type
ORDER BY role_type ASC)
SELECT array_agg(role_type)::text[] FROM unnested_role_types""")
db_types = cursor.fetchone()[0]
return dict(zip(cnxepub.ATTRIBUTED_ROLE_KEYS, db_types))[type_] |
def _dissect_roles(metadata):
"""Given a model's ``metadata``, iterate over the roles.
Return values are the role identifier and role type as a tuple.
"""
for role_key in cnxepub.ATTRIBUTED_ROLE_KEYS:
for user in metadata.get(role_key, []):
if user['type'] != 'cnx-id':
raise ValueError("Archive only accepts Connexions users.")
uid = parse_user_uri(user['id'])
yield uid, role_key
raise StopIteration() |
def upsert_pending_licensors(cursor, document_id):
"""Update or insert records for pending license acceptors."""
cursor.execute("""\
SELECT "uuid", "metadata"
FROM pending_documents
WHERE id = %s""", (document_id,))
uuid_, metadata = cursor.fetchone()
acceptors = set([uid for uid, type_ in _dissect_roles(metadata)])
# Acquire a list of existing acceptors.
cursor.execute("""\
SELECT "user_id", "accepted"
FROM license_acceptances
WHERE uuid = %s""", (uuid_,))
existing_acceptors_mapping = dict(cursor.fetchall())
# Who's not in the existing list?
existing_acceptors = set(existing_acceptors_mapping.keys())
new_acceptors = acceptors.difference(existing_acceptors)
# Insert the new licensor acceptors.
for acceptor in new_acceptors:
cursor.execute("""\
INSERT INTO license_acceptances
("uuid", "user_id", "accepted")
VALUES (%s, %s, NULL)""", (uuid_, acceptor,))
# Has everyone already accepted?
cursor.execute("""\
SELECT user_id
FROM license_acceptances
WHERE
uuid = %s
AND
(accepted is UNKNOWN OR accepted is FALSE)""", (uuid_,))
defectors = set(cursor.fetchall())
if not defectors:
# Update the pending document license acceptance state.
cursor.execute("""\
update pending_documents set license_accepted = 't'
where id = %s""", (document_id,)) |
def upsert_pending_roles(cursor, document_id):
"""Update or insert records for pending document role acceptance."""
cursor.execute("""\
SELECT "uuid", "metadata"
FROM pending_documents
WHERE id = %s""", (document_id,))
uuid_, metadata = cursor.fetchone()
acceptors = set([(uid, _role_type_to_db_type(type_),)
for uid, type_ in _dissect_roles(metadata)])
# Upsert the user info.
upsert_users(cursor, [x[0] for x in acceptors])
# Acquire a list of existing acceptors.
cursor.execute("""\
SELECT user_id, role_type
FROM role_acceptances
WHERE uuid = %s""", (uuid_,))
existing_roles = set([(r, t,) for r, t in cursor.fetchall()])
# Who's not in the existing list?
existing_acceptors = existing_roles
new_acceptors = acceptors.difference(existing_acceptors)
# Insert the new role acceptors.
for acceptor, type_ in new_acceptors:
cursor.execute("""\
INSERT INTO role_acceptances
("uuid", "user_id", "role_type", "accepted")
VALUES (%s, %s, %s, DEFAULT)""", (uuid_, acceptor, type_))
# Has everyone already accepted?
cursor.execute("""\
SELECT user_id
FROM role_acceptances
WHERE
uuid = %s
AND
(accepted is UNKNOWN OR accepted is FALSE)""", (uuid_,))
defectors = set(cursor.fetchall())
if not defectors:
# Update the pending document license acceptance state.
cursor.execute("""\
update pending_documents set roles_accepted = 't'
where id = %s""", (document_id,)) |
def obtain_licenses():
"""Obtain the licenses in a dictionary form, keyed by url."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT combined_row.url, row_to_json(combined_row) FROM (
SELECT "code", "version", "name", "url", "is_valid_for_publication"
FROM licenses) AS combined_row""")
licenses = {r[0]: r[1] for r in cursor.fetchall()}
return licenses |
def _validate_license(model):
"""Given the model, check the license is one valid for publication."""
license_mapping = obtain_licenses()
try:
license_url = model.metadata['license_url']
except KeyError:
raise exceptions.MissingRequiredMetadata('license_url')
try:
license = license_mapping[license_url]
except KeyError:
raise exceptions.InvalidLicense(license_url)
if not license['is_valid_for_publication']:
raise exceptions.InvalidLicense(license_url) |
def _validate_roles(model):
"""Given the model, check that all the metadata role values
have valid information in them and any required metadata fields
contain values.
"""
required_roles = (ATTRIBUTED_ROLE_KEYS[0], ATTRIBUTED_ROLE_KEYS[4],)
for role_key in ATTRIBUTED_ROLE_KEYS:
try:
roles = model.metadata[role_key]
except KeyError:
if role_key in required_roles:
raise exceptions.MissingRequiredMetadata(role_key)
else:
if role_key in required_roles and len(roles) == 0:
raise exceptions.MissingRequiredMetadata(role_key)
for role in roles:
if role.get('type') != 'cnx-id':
raise exceptions.InvalidRole(role_key, role) |
def _validate_derived_from(cursor, model):
"""Given a database cursor and model, check the derived-from
value accurately points to content in the archive.
The value can be nothing or must point to existing content.
"""
derived_from_uri = model.metadata.get('derived_from_uri')
if derived_from_uri is None:
return # bail out early
# Can we parse the value?
try:
ident_hash = parse_archive_uri(derived_from_uri)
uuid_, version = split_ident_hash(ident_hash, split_version=True)
except (ValueError, IdentHashSyntaxError, IdentHashShortId) as exc:
raise exceptions.InvalidMetadata('derived_from_uri', derived_from_uri,
original_exception=exc)
# Is the ident-hash a valid pointer?
args = [uuid_]
table = 'modules'
version_condition = ''
if version != (None, None,):
args.extend(version)
table = 'modules'
version_condition = " AND major_version = %s" \
" AND minor_version {} %s" \
.format(version[1] is None and 'is' or '=')
cursor.execute("""SELECT 't' FROM {} WHERE uuid::text = %s{}"""
.format(table, version_condition), args)
try:
_exists = cursor.fetchone()[0] # noqa
except TypeError: # None type
raise exceptions.InvalidMetadata('derived_from_uri', derived_from_uri)
# Assign the derived_from value so that we don't have to split it again.
model.metadata['derived_from'] = ident_hash |
def _validate_subjects(cursor, model):
"""Give a database cursor and model, check the subjects against
the subject vocabulary.
"""
subject_vocab = [term[0] for term in acquire_subject_vocabulary(cursor)]
subjects = model.metadata.get('subjects', [])
invalid_subjects = [s for s in subjects if s not in subject_vocab]
if invalid_subjects:
raise exceptions.InvalidMetadata('subjects', invalid_subjects) |
def validate_model(cursor, model):
"""Validates the model using a series of checks on bits of the data."""
# Check the license is one valid for publication.
_validate_license(model)
_validate_roles(model)
# Other required metadata includes: title, summary
required_metadata = ('title', 'summary',)
for metadata_key in required_metadata:
if model.metadata.get(metadata_key) in [None, '', []]:
raise exceptions.MissingRequiredMetadata(metadata_key)
# Ensure that derived-from values are either None
# or point at a live record in the archive.
_validate_derived_from(cursor, model)
# FIXME Valid language code?
# Are the given 'subjects'
_validate_subjects(cursor, model) |
def is_publication_permissible(cursor, publication_id, uuid_):
"""Check the given publisher of this publication given
by ``publication_id`` is allowed to publish the content given
by ``uuid``.
"""
# Check the publishing user has permission to publish
cursor.execute("""\
SELECT 't'::boolean
FROM
pending_documents AS pd
NATURAL JOIN document_acl AS acl
JOIN publications AS p ON (pd.publication_id = p.id)
WHERE
p.id = %s
AND
pd.uuid = %s
AND
p.publisher = acl.user_id
AND
acl.permission = 'publish'""", (publication_id, uuid_,))
try:
is_allowed = cursor.fetchone()[0]
except TypeError:
is_allowed = False
return is_allowed |
def add_pending_model(cursor, publication_id, model):
"""Adds a model (binder or document) that is awaiting publication
to the database.
"""
# FIXME Too much happening here...
assert isinstance(model, (cnxepub.Document, cnxepub.Binder,)), type(model)
uri = model.get_uri('cnx-archive')
if uri is not None:
ident_hash = parse_archive_uri(uri)
id, version = split_ident_hash(ident_hash, split_version=True)
cursor.execute("""\
SELECT major_version + 1 as next_version
FROM latest_modules
WHERE uuid = %s
UNION ALL
SELECT 1 as next_version
ORDER BY next_version DESC
LIMIT 1
""", (id,))
next_major_version = cursor.fetchone()[0]
if isinstance(model, cnxepub.Document):
version = (next_major_version, None,)
else: # ...assume it's a binder.
version = (next_major_version, 1,)
else:
cursor.execute("""\
WITH
control_insert AS (
INSERT INTO document_controls (uuid) VALUES (DEFAULT) RETURNING uuid),
acl_insert AS (
INSERT INTO document_acl (uuid, user_id, permission)
VALUES ((SELECT uuid FROM control_insert),
(SELECT publisher FROM publications WHERE id = %s),
'publish'::permission_type))
SELECT uuid FROM control_insert""", (publication_id,))
id = cursor.fetchone()[0]
if isinstance(model, cnxepub.Document):
version = (1, None,)
else: # ...assume it's a binder.
version = (1, 1,)
type_ = _get_type_name(model)
model.id = str(id)
model.metadata['version'] = '.'.join([str(v) for v in version if v])
args = [publication_id, id, version[0], version[1], type_,
json.dumps(model.metadata)]
cursor.execute("""\
INSERT INTO "pending_documents"
("publication_id", "uuid", "major_version", "minor_version", "type",
"license_accepted", "roles_accepted", "metadata")
VALUES (%s, %s, %s, %s, %s, 'f', 'f', %s)
RETURNING "id", "uuid", module_version("major_version", "minor_version")
""", args)
pending_id, uuid_, version = cursor.fetchone()
pending_ident_hash = join_ident_hash(uuid_, version)
# Assign the new ident-hash to the document for later use.
request = get_current_request()
path = request.route_path('get-content', ident_hash=pending_ident_hash)
model.set_uri('cnx-archive', path)
# Check if the publication is allowed for the publishing user.
if not is_publication_permissible(cursor, publication_id, id):
# Set the failure but continue the operation of inserting
# the pending document.
exc = exceptions.NotAllowed(id)
exc.publication_id = publication_id
exc.pending_document_id = pending_id
exc.pending_ident_hash = pending_ident_hash
set_publication_failure(cursor, exc)
try:
validate_model(cursor, model)
except exceptions.PublicationException as exc:
exc_info = sys.exc_info()
exc.publication_id = publication_id
exc.pending_document_id = pending_id
exc.pending_ident_hash = pending_ident_hash
try:
set_publication_failure(cursor, exc)
except BaseException:
import traceback
print("Critical data error. Immediate attention is "
"required. On publication at '{}'."
.format(publication_id),
file=sys.stderr)
# Print the critical exception.
traceback.print_exc()
# Raise the previous exception, so we know the original cause.
raise exc_info[0], exc_info[1], exc_info[2]
else:
upsert_pending_licensors(cursor, pending_id)
upsert_pending_roles(cursor, pending_id)
notify_users(cursor, pending_id)
return pending_ident_hash |
def lookup_document_pointer(ident_hash, cursor):
"""Lookup a document by id and version."""
id, version = split_ident_hash(ident_hash, split_version=True)
stmt = "SELECT name FROM modules WHERE uuid = %s"
args = [id]
if version and version[0] is not None:
operator = version[1] is None and 'is' or '='
stmt += " AND (major_version = %s AND minor_version {} %s)" \
.format(operator)
args.extend(version)
cursor.execute(stmt, args)
try:
title = cursor.fetchone()[0]
except TypeError:
raise DocumentLookupError()
else:
metadata = {'title': title}
return cnxepub.DocumentPointer(ident_hash, metadata) |
def add_pending_model_content(cursor, publication_id, model):
"""Updates the pending model's content.
This is a secondary step not in ``add_pending_model, because
content reference resolution requires the identifiers as they
will appear in the end publication.
"""
cursor.execute("""\
SELECT id, ident_hash(uuid, major_version, minor_version)
FROM pending_documents
WHERE publication_id = %s AND uuid = %s""",
(publication_id, model.id,))
document_info = cursor.fetchone()
def attach_info_to_exception(exc):
"""Small cached function to grab the pending document id
and hash to attach to the exception, which is useful when
reading the json data on a response.
"""
exc.publication_id = publication_id
exc.pending_document_id, exc.pending_ident_hash = document_info
def mark_invalid_reference(reference):
"""Set the publication to failure and attach invalid reference
to the publication.
"""
exc = exceptions.InvalidReference(reference)
attach_info_to_exception(exc)
set_publication_failure(cursor, exc)
for resource in getattr(model, 'resources', []):
add_pending_resource(cursor, resource, document=model)
if isinstance(model, cnxepub.Document):
for reference in model.references:
if reference.is_bound:
reference.bind(reference.bound_model, '/resources/{}')
elif reference.remote_type == cnxepub.INTERNAL_REFERENCE_TYPE:
if reference.uri.startswith('#'):
pass
elif reference.uri.startswith('/contents'):
ident_hash = parse_archive_uri(reference.uri)
try:
doc_pointer = lookup_document_pointer(
ident_hash, cursor)
except DocumentLookupError:
mark_invalid_reference(reference)
else:
reference.bind(doc_pointer, "/contents/{}")
else:
mark_invalid_reference(reference)
# else, it's a remote or cnx.org reference ...Do nothing.
args = (psycopg2.Binary(model.content.encode('utf-8')),
publication_id, model.id,)
stmt = """\
UPDATE "pending_documents"
SET ("content") = (%s)
WHERE "publication_id" = %s AND "uuid" = %s"""
else:
metadata = model.metadata.copy()
# All document pointers in the tree are valid?
document_pointers = [m for m in cnxepub.flatten_model(model)
if isinstance(m, cnxepub.DocumentPointer)]
document_pointer_ident_hashes = [
(split_ident_hash(dp.ident_hash)[0],
split_ident_hash(dp.ident_hash, split_version=True)[1][0],
split_ident_hash(dp.ident_hash, split_version=True)[1][1],)
# split_ident_hash(dp.ident_hash, split_version=True)[1][0],)
for dp in document_pointers]
document_pointer_ident_hashes = zip(*document_pointer_ident_hashes)
if document_pointers:
uuids, major_vers, minor_vers = document_pointer_ident_hashes
cursor.execute("""\
SELECT dp.uuid, module_version(dp.maj_ver, dp.min_ver) AS version,
dp.uuid = m.uuid AS exists,
m.portal_type = 'Module' AS is_document
FROM (SELECT unnest(%s::uuid[]), unnest(%s::integer[]), unnest(%s::integer[]))\
AS dp(uuid, maj_ver, min_ver)
LEFT JOIN modules AS m ON dp.uuid = m.uuid AND \
(dp.maj_ver = m.major_version OR dp.maj_ver is null)""",
(list(uuids), list(major_vers), list(minor_vers),))
valid_pointer_results = cursor.fetchall()
for result_row in valid_pointer_results:
uuid, version, exists, is_document = result_row
if not (exists and is_document):
dp = [dp for dp in document_pointers
if dp.ident_hash == join_ident_hash(uuid, version)
][0]
exc = exceptions.InvalidDocumentPointer(
dp, exists=exists, is_document=is_document)
attach_info_to_exception(exc)
set_publication_failure(cursor, exc)
# Insert the tree into the metadata.
metadata['_tree'] = cnxepub.model_to_tree(model)
args = (json.dumps(metadata),
None, # TODO Render the HTML tree at ``model.content``.
publication_id, model.id,)
# Must pave over metadata because postgresql lacks built-in
# json update functions.
stmt = """\
UPDATE "pending_documents"
SET ("metadata", "content") = (%s, %s)
WHERE "publication_id" = %s AND "uuid" = %s"""
cursor.execute(stmt, args) |
def set_publication_failure(cursor, exc):
"""Given a publication exception, set the publication as failed and
append the failure message to the publication record.
"""
publication_id = exc.publication_id
if publication_id is None:
raise ValueError("Exception must have a ``publication_id`` value.")
cursor.execute("""\
SELECT "state_messages"
FROM publications
WHERE id = %s""", (publication_id,))
state_messages = cursor.fetchone()[0]
if state_messages is None:
state_messages = []
entry = exc.__dict__
entry['message'] = exc.message
state_messages.append(entry)
state_messages = json.dumps(state_messages)
cursor.execute("""\
UPDATE publications SET ("state", "state_messages") = (%s, %s)
WHERE id = %s""", ('Failed/Error', state_messages, publication_id,)) |
def add_publication(cursor, epub, epub_file, is_pre_publication=False):
"""Adds a publication entry and makes each item
a pending document.
"""
publisher = epub[0].metadata['publisher']
publish_message = epub[0].metadata['publication_message']
epub_binary = psycopg2.Binary(epub_file.read())
args = (publisher, publish_message, epub_binary, is_pre_publication,)
cursor.execute("""\
INSERT INTO publications
("publisher", "publication_message", "epub", "is_pre_publication")
VALUES (%s, %s, %s, %s)
RETURNING id
""", args)
publication_id = cursor.fetchone()[0]
insert_mapping = {}
models = set([])
for package in epub:
binder = cnxepub.adapt_package(package)
if binder in models:
continue
for document in cnxepub.flatten_to_documents(binder):
if document not in models:
ident_hash = add_pending_model(
cursor, publication_id, document)
insert_mapping[document.id] = ident_hash
models.add(document)
# The binding object could be translucent/see-through,
# (case for a binder that only contains loose-documents).
# Otherwise we should also publish the the binder.
if not binder.is_translucent:
ident_hash = add_pending_model(cursor, publication_id, binder)
insert_mapping[binder.id] = ident_hash
models.add(binder)
for model in models:
# Now that all models have been given an identifier
# we can write the content to the database.
try:
add_pending_model_content(cursor, publication_id, model)
except ResourceFileExceededLimitError as e:
e.publication_id = publication_id
set_publication_failure(cursor, e)
return publication_id, insert_mapping |
def _check_pending_document_license_state(cursor, document_id):
"""Check the aggregate state on the pending document."""
cursor.execute("""\
SELECT BOOL_AND(accepted IS TRUE)
FROM
pending_documents AS pd,
license_acceptances AS la
WHERE
pd.id = %s
AND
pd.uuid = la.uuid""",
(document_id,))
try:
is_accepted = cursor.fetchone()[0]
except TypeError:
# There are no licenses associated with this document.
is_accepted = True
return is_accepted |
def _check_pending_document_role_state(cursor, document_id):
"""Check the aggregate state on the pending document."""
cursor.execute("""\
SELECT BOOL_AND(accepted IS TRUE)
FROM
role_acceptances AS ra,
pending_documents as pd
WHERE
pd.id = %s
AND
pd.uuid = ra.uuid""",
(document_id,))
try:
is_accepted = cursor.fetchone()[0]
except TypeError:
# There are no roles to accept
is_accepted = True
return is_accepted |
def _update_pending_document_state(cursor, document_id, is_license_accepted,
are_roles_accepted):
"""Update the state of the document's state values."""
args = (bool(is_license_accepted), bool(are_roles_accepted),
document_id,)
cursor.execute("""\
UPDATE pending_documents
SET (license_accepted, roles_accepted) = (%s, %s)
WHERE id = %s""",
args) |
def is_revision_publication(publication_id, cursor):
"""Checks to see if the publication contains any revised models.
Revised in this context means that it is a new version of an
existing piece of content.
"""
cursor.execute("""\
SELECT 't'::boolean FROM modules
WHERE uuid IN (SELECT uuid
FROM pending_documents
WHERE publication_id = %s)
LIMIT 1""", (publication_id,))
try:
cursor.fetchone()[0]
except TypeError: # NoneType
has_revision_models = False
else:
has_revision_models = True
return has_revision_models |
def poke_publication_state(publication_id, cursor):
"""Invoked to poke at the publication to update and acquire its current
state. This is used to persist the publication to archive.
"""
cursor.execute("""\
SELECT "state", "state_messages", "is_pre_publication", "publisher"
FROM publications
WHERE id = %s""", (publication_id,))
row = cursor.fetchone()
current_state, messages, is_pre_publication, publisher = row
if current_state in END_N_INTERIM_STATES:
# Bailout early, because the publication is either in progress
# or has been completed.
return current_state, messages
# Check for acceptance...
cursor.execute("""\
SELECT
pd.id, license_accepted, roles_accepted
FROM publications AS p JOIN pending_documents AS pd ON p.id = pd.publication_id
WHERE p.id = %s
""", (publication_id,))
pending_document_states = cursor.fetchall()
publication_state_mapping = {}
for document_state in pending_document_states:
id, is_license_accepted, are_roles_accepted = document_state
publication_state_mapping[id] = [is_license_accepted,
are_roles_accepted]
has_changed_state = False
if is_license_accepted and are_roles_accepted:
continue
if not is_license_accepted:
accepted = _check_pending_document_license_state(
cursor, id)
if accepted != is_license_accepted:
has_changed_state = True
is_license_accepted = accepted
publication_state_mapping[id][0] = accepted
if not are_roles_accepted:
accepted = _check_pending_document_role_state(
cursor, id)
if accepted != are_roles_accepted:
has_changed_state = True
are_roles_accepted = accepted
publication_state_mapping[id][1] = accepted
if has_changed_state:
_update_pending_document_state(cursor, id,
is_license_accepted,
are_roles_accepted)
# Are all the documents ready for publication?
state_lump = set([l and r for l, r in publication_state_mapping.values()])
is_publish_ready = not (False in state_lump) and not (None in state_lump)
change_state = "Done/Success"
if not is_publish_ready:
change_state = "Waiting for acceptance"
# Does this publication need moderation? (ignore on pre-publication)
# TODO Is this a revision publication? If so, it doesn't matter who the
# user is, because they have been vetted by the previous publisher.
# This has loopholes...
if not is_pre_publication and is_publish_ready:
# Has this publisher been moderated before?
cursor.execute("""\
SELECT is_moderated
FROM users AS u LEFT JOIN publications AS p ON (u.username = p.publisher)
WHERE p.id = %s""",
(publication_id,))
try:
is_publisher_moderated = cursor.fetchone()[0]
except TypeError:
is_publisher_moderated = False
# Are any of these documents a revision? Thus vetting of
# the publisher was done by a vetted peer.
if not is_publisher_moderated \
and not is_revision_publication(publication_id, cursor):
# Hold up! This publish needs moderation.
change_state = "Waiting for moderation"
is_publish_ready = False
# Publish the pending documents.
if is_publish_ready:
change_state = "Done/Success"
if not is_pre_publication:
publication_state = publish_pending(cursor, publication_id)
else:
cursor.execute("""\
UPDATE publications
SET state = %s
WHERE id = %s
RETURNING state, state_messages""", (change_state, publication_id,))
publication_state, messages = cursor.fetchone()
else:
# `change_state` set prior to this...
cursor.execute("""\
UPDATE publications
SET state = %s
WHERE id = %s
RETURNING state, state_messages""", (change_state, publication_id,))
publication_state, messages = cursor.fetchone()
return publication_state, messages |
def check_publication_state(publication_id):
"""Check the publication's current state."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT "state", "state_messages"
FROM publications
WHERE id = %s""", (publication_id,))
publication_state, publication_messages = cursor.fetchone()
return publication_state, publication_messages |
def _node_to_model(tree_or_item, metadata=None, parent=None,
lucent_id=cnxepub.TRANSLUCENT_BINDER_ID):
"""Given a tree, parse to a set of models"""
if 'contents' in tree_or_item:
# It is a binder.
tree = tree_or_item
binder = cnxepub.TranslucentBinder(metadata=tree)
for item in tree['contents']:
node = _node_to_model(item, parent=binder,
lucent_id=lucent_id)
if node.metadata['title'] != item['title']:
binder.set_title_for_node(node, item['title'])
result = binder
else:
# It is an item pointing at a document.
item = tree_or_item
result = cnxepub.DocumentPointer(item['id'], metadata=item)
if parent is not None:
parent.append(result)
return result |
def _reassemble_binder(id, tree, metadata):
"""Reassemble a Binder object coming out of the database."""
binder = cnxepub.Binder(id, metadata=metadata)
for item in tree['contents']:
node = _node_to_model(item, parent=binder)
if node.metadata['title'] != item['title']:
binder.set_title_for_node(node, item['title'])
return binder |
def publish_pending(cursor, publication_id):
"""Given a publication id as ``publication_id``,
write the documents to the *Connexions Archive*.
"""
cursor.execute("""\
WITH state_update AS (
UPDATE publications SET state = 'Publishing' WHERE id = %s
)
SELECT publisher, publication_message
FROM publications
WHERE id = %s""",
(publication_id, publication_id,))
publisher, message = cursor.fetchone()
cursor.connection.commit()
all_models = []
from .publish import publish_model
# Commit documents one at a time...
type_ = cnxepub.Document.__name__
cursor.execute("""\
SELECT id, uuid, major_version, minor_version, metadata, content
FROM pending_documents
WHERE type = %s AND publication_id = %s""", (type_, publication_id,))
for row in cursor.fetchall():
# FIXME Oof, this is hideous!
id, major_version, minor_version = row[1:4]
id = str(id)
version = '.'.join([str(x)
for x in (major_version, minor_version,)
if x is not None])
metadata, content = row[-2:]
content = content[:]
metadata['version'] = version
document = cnxepub.Document(id, content, metadata)
for ref in document.references:
if ref.uri.startswith('/resources/'):
hash = ref.uri[len('/resources/'):]
cursor.execute("""\
SELECT data, media_type
FROM pending_resources
WHERE hash = %s""", (hash,))
data, media_type = cursor.fetchone()
document.resources.append(cnxepub.Resource(
hash, io.BytesIO(data[:]), media_type, filename=hash))
_ident_hash = publish_model(cursor, document, publisher, message) # noqa
all_models.append(document)
# And now the binders, one at a time...
type_ = cnxepub.Binder.__name__
cursor.execute("""\
SELECT id, uuid, major_version, minor_version, metadata, content
FROM pending_documents
WHERE type = %s AND publication_id = %s""", (type_, publication_id,))
for row in cursor.fetchall():
id, major_version, minor_version, metadata = row[1:5]
tree = metadata['_tree']
binder = _reassemble_binder(str(id), tree, metadata)
# Add the resources
cursor.execute("""\
SELECT hash, data, media_type, filename
FROM pending_resources r
JOIN pending_resource_associations a ON a.resource_id = r.id
JOIN pending_documents d ON a.document_id = d.id
WHERE ident_hash(uuid, major_version, minor_version) = %s""",
(binder.ident_hash,))
binder.resources = [
cnxepub.Resource(r_hash,
io.BytesIO(r_data[:]),
r_media_type,
filename=r_filename)
for (r_hash, r_data, r_media_type, r_filename)
in cursor.fetchall()]
_ident_hash = publish_model(cursor, binder, publisher, message) # noqa
all_models.append(binder)
# Republish binders containing shared documents.
from .publish import republish_binders
_republished_ident_hashes = republish_binders(cursor, all_models) # noqa
# Lastly, update the publication status.
cursor.execute("""\
UPDATE publications
SET state = 'Done/Success'
WHERE id = %s
RETURNING state""", (publication_id,))
state = cursor.fetchone()[0]
return state |
def accept_publication_license(cursor, publication_id, user_id,
document_ids, is_accepted=False):
"""Accept or deny the document license for the publication
(``publication_id``) and user (at ``user_id``)
for the documents (listed by id as ``document_ids``).
"""
cursor.execute("""\
UPDATE license_acceptances AS la
SET accepted = %s
FROM pending_documents AS pd
WHERE
pd.publication_id = %s
AND
la.user_id = %s
AND
pd.uuid = ANY(%s::UUID[])
AND
pd.uuid = la.uuid""",
(is_accepted, publication_id, user_id, document_ids,)) |
def accept_publication_role(cursor, publication_id, user_id,
document_ids, is_accepted=False):
"""Accept or deny the document role attribution for the publication
(``publication_id``) and user (at ``user_id``)
for the documents (listed by id as ``document_ids``).
"""
cursor.execute("""\
UPDATE role_acceptances AS ra
SET accepted = %s
FROM pending_documents AS pd
WHERE
pd.publication_id = %s
AND
ra.user_id = %s
AND
pd.uuid = ANY(%s::UUID[])
AND
pd.uuid = ra.uuid""",
(is_accepted, publication_id, user_id, document_ids,)) |
def upsert_license_requests(cursor, uuid_, roles):
"""Given a ``uuid`` and list of ``roles`` (user identifiers)
create a license acceptance entry. If ``has_accepted`` is supplied,
it will be used to assign an acceptance value to all listed ``uids``.
"""
if not isinstance(roles, (list, set, tuple,)):
raise TypeError("``roles`` is an invalid type: {}".format(type(roles)))
acceptors = set([x['uid'] for x in roles])
# Acquire a list of existing acceptors.
cursor.execute("""\
SELECT user_id, accepted FROM license_acceptances WHERE uuid = %s""",
(uuid_,))
existing_acceptors = cursor.fetchall()
# Who's not in the existing list?
new_acceptors = acceptors.difference([x[0] for x in existing_acceptors])
# Insert the new licensor acceptors.
if new_acceptors:
args = []
values_fmt = []
for uid in new_acceptors:
has_accepted = [x.get('has_accepted', None)
for x in roles
if uid == x['uid']][0]
args.extend([uuid_, uid, has_accepted])
values_fmt.append("(%s, %s, %s)")
values_fmt = ', '.join(values_fmt)
cursor.execute("""\
INSERT INTO license_acceptances (uuid, user_id, accepted)
VALUES {}""".format(values_fmt), args)
# Update any existing license acceptors
acceptors = set([
(x['uid'], x.get('has_accepted', None),)
for x in roles
# Prevent updating newly inserted records.
if (x['uid'], x.get('has_accepted', None),) not in new_acceptors
])
existing_acceptors = set([
x for x in existing_acceptors
# Prevent updating newly inserted records.
if x[0] not in new_acceptors
])
tobe_updated_acceptors = acceptors.difference(existing_acceptors)
for uid, has_accepted in tobe_updated_acceptors:
cursor.execute("""\
UPDATE license_acceptances SET accepted = %s
WHERE uuid = %s AND user_id = %s""", (has_accepted, uuid_, uid,)) |
def remove_license_requests(cursor, uuid_, uids):
"""Given a ``uuid`` and list of ``uids`` (user identifiers)
remove the identified users' license acceptance entries.
"""
if not isinstance(uids, (list, set, tuple,)):
raise TypeError("``uids`` is an invalid type: {}".format(type(uids)))
acceptors = list(set(uids))
# Remove the the entries.
cursor.execute("""\
DELETE FROM license_acceptances
WHERE uuid = %s AND user_id = ANY(%s::text[])""", (uuid_, acceptors,)) |
def upsert_role_requests(cursor, uuid_, roles):
"""Given a ``uuid`` and list of dicts containing the ``uid`` and
``role`` for creating a role acceptance entry. The ``roles`` dict
can optionally contain a ``has_accepted`` value, which will default
to true.
"""
if not isinstance(roles, (list, set, tuple,)):
raise TypeError("``roles`` is an invalid type: {}"
.format(type(roles)))
acceptors = set([(x['uid'], x['role'],) for x in roles])
# Acquire a list of existing acceptors.
cursor.execute("""\
SELECT user_id, role_type, accepted
FROM role_acceptances
WHERE uuid = %s""", (uuid_,))
existing_roles = cursor.fetchall()
# Who's not in the existing list?
existing_acceptors = set([(r, t,) for r, t, _ in existing_roles])
new_acceptors = acceptors.difference(existing_acceptors)
# Insert the new role acceptors.
for acceptor, type_ in new_acceptors:
has_accepted = [x.get('has_accepted', None)
for x in roles
if acceptor == x['uid'] and type_ == x['role']][0]
cursor.execute("""\
INSERT INTO role_acceptances ("uuid", "user_id", "role_type", "accepted")
VALUES (%s, %s, %s, %s)""", (uuid_, acceptor, type_, has_accepted,))
# Update any existing license acceptors
acceptors = set([
(x['uid'], x['role'], x.get('has_accepted', None),)
for x in roles
# Prevent updating newly inserted records.
if (x['uid'], x.get('has_accepted', None),) not in new_acceptors
])
existing_acceptors = set([
x for x in existing_roles
# Prevent updating newly inserted records.
if (x[0], x[1],) not in new_acceptors
])
tobe_updated_acceptors = acceptors.difference(existing_acceptors)
for uid, type_, has_accepted in tobe_updated_acceptors:
cursor.execute("""\
UPDATE role_acceptances SET accepted = %s
WHERE uuid = %s AND user_id = %s AND role_type = %s""",
(has_accepted, uuid_, uid, type_,)) |
def remove_role_requests(cursor, uuid_, roles):
"""Given a ``uuid`` and list of dicts containing the ``uid``
(user identifiers) and ``role`` for removal of the identified
users' role acceptance entries.
"""
if not isinstance(roles, (list, set, tuple,)):
raise TypeError("``roles`` is an invalid type: {}".format(type(roles)))
acceptors = set([(x['uid'], x['role'],) for x in roles])
# Remove the the entries.
for uid, role_type in acceptors:
cursor.execute("""\
DELETE FROM role_acceptances
WHERE uuid = %s AND user_id = %s AND role_type = %s""",
(uuid_, uid, role_type,)) |
def upsert_acl(cursor, uuid_, permissions):
"""Given a ``uuid`` and a set of permissions given as a
tuple of ``uid`` and ``permission``, upsert them into the database.
"""
if not isinstance(permissions, (list, set, tuple,)):
raise TypeError("``permissions`` is an invalid type: {}"
.format(type(permissions)))
permissions = set(permissions)
# Acquire the existin ACL.
cursor.execute("""\
SELECT user_id, permission
FROM document_acl
WHERE uuid = %s""", (uuid_,))
existing = set([(r, t,) for r, t in cursor.fetchall()])
# Who's not in the existing list?
new_entries = permissions.difference(existing)
# Insert the new permissions.
for uid, permission in new_entries:
cursor.execute("""\
INSERT INTO document_acl
("uuid", "user_id", "permission")
VALUES (%s, %s, %s)""", (uuid_, uid, permission)) |
def remove_acl(cursor, uuid_, permissions):
"""Given a ``uuid`` and a set of permissions given as a tuple
of ``uid`` and ``permission``, remove these entries from the database.
"""
if not isinstance(permissions, (list, set, tuple,)):
raise TypeError("``permissions`` is an invalid type: {}"
.format(type(permissions)))
permissions = set(permissions)
# Remove the the entries.
for uid, permission in permissions:
cursor.execute("""\
DELETE FROM document_acl
WHERE uuid = %s AND user_id = %s AND permission = %s""",
(uuid_, uid, permission,)) |
def _upsert_persons(cursor, person_ids, lookup_func):
"""Upsert's user info into the database.
The model contains the user info as part of the role values.
"""
person_ids = list(set(person_ids)) # cleanse data
# Check for existing records to update.
cursor.execute("SELECT personid from persons where personid = ANY (%s)",
(person_ids,))
existing_person_ids = [x[0] for x in cursor.fetchall()]
new_person_ids = [p for p in person_ids if p not in existing_person_ids]
# Update existing records.
for person_id in existing_person_ids:
# TODO only update based on a delta against the 'updated' column.
person_info = lookup_func(person_id)
cursor.execute("""\
UPDATE persons
SET (personid, firstname, surname, fullname) =
( %(username)s, %(first_name)s, %(last_name)s,
%(full_name)s)
WHERE personid = %(username)s""", person_info)
# Insert new records.
# Email is an empty string because
# accounts no longer gives out user
# email info but a string datatype
# is still needed for legacy to
# properly process the persons table
for person_id in new_person_ids:
person_info = lookup_func(person_id)
cursor.execute("""\
INSERT INTO persons
(personid, firstname, surname, fullname, email)
VALUES
(%(username)s, %(first_name)s,
%(last_name)s, %(full_name)s, '')""", person_info) |
def _upsert_users(cursor, user_ids, lookup_func):
"""Upsert's user info into the database.
The model contains the user info as part of the role values.
"""
user_ids = list(set(user_ids)) # cleanse data
# Check for existing records to update.
cursor.execute("SELECT username from users where username = ANY (%s)",
(user_ids,))
existing_user_ids = [x[0] for x in cursor.fetchall()]
new_user_ids = [u for u in user_ids if u not in existing_user_ids]
# Update existing records.
for user_id in existing_user_ids:
# TODO only update based on a delta against the 'updated' column.
user_info = lookup_func(user_id)
cursor.execute("""\
UPDATE users
SET (updated, username, first_name, last_name, full_name, title) =
(CURRENT_TIMESTAMP, %(username)s, %(first_name)s, %(last_name)s,
%(full_name)s, %(title)s)
WHERE username = %(username)s""", user_info)
# Insert new records.
for user_id in new_user_ids:
user_info = lookup_func(user_id)
cursor.execute("""\
INSERT INTO users
(username, first_name, last_name, full_name, suffix, title)
VALUES
(%(username)s, %(first_name)s, %(last_name)s, %(full_name)s,
%(suffix)s, %(title)s)""", user_info) |
def upsert_users(cursor, user_ids):
"""Given a set of user identifiers (``user_ids``),
upsert them into the database after checking accounts for
the latest information.
"""
accounts = get_current_registry().getUtility(IOpenstaxAccounts)
def lookup_profile(username):
profile = accounts.get_profile_by_username(username)
# See structure documentation at:
# https://<accounts-instance>/api/docs/v1/users/index
if profile is None:
raise UserFetchError(username)
opt_attrs = ('first_name', 'last_name', 'full_name',
'title', 'suffix',)
for attr in opt_attrs:
profile.setdefault(attr, None)
return profile
_upsert_users(cursor, user_ids, lookup_profile)
_upsert_persons(cursor, user_ids, lookup_profile) |
def notify_users(cursor, document_id):
"""Notify all users about their role and/or license acceptance
for a piece of content associated with the given ``document_id``.
"""
return
registry = get_current_registry()
accounts = registry.getUtility(IOpenstaxAccounts)
cursor.execute("""\
SELECT la.user_id
FROM license_acceptances AS la
WHERE
la.uuid = (SELECT uuid FROM pending_documents WHERE id = %s)
AND la.notified IS NULL AND (NOT la.accepted or la.accepted IS UNKNOWN)
""", (document_id,))
licensors = [x[0] for x in cursor.fetchall()]
cursor.execute("""\
SELECT user_id, array_agg(role_type)::text[]
FROM role_acceptances AS ra
WHERE
ra.uuid = (SELECT uuid FROM pending_documents WHERE id = %s)
AND ra.notified IS NULL AND (NOT ra.accepted or ra.accepted IS UNKNOWN)
GROUP BY user_id
""", (document_id,))
roles = {u: r for u, r in cursor.fetchall()}
needs_notified = set(licensors + roles.keys())
for user_id in needs_notified:
data = {
'user_id': user_id,
'full_name': None, # TODO
'licensor': user_id in licensors,
'roles': roles.get(user_id, []),
}
message = NOTIFICATION_TEMPLATE.render(**data)
accounts.send_message(user_id, NOFIFICATION_SUBJECT, message)
cursor.execute("""\
UPDATE license_acceptances SET notified = CURRENT_TIMESTAMP
WHERE
uuid = (SELECT uuid FROM pending_documents WHERE id = %s)
AND user_id = ANY (%s)""", (document_id, licensors,))
# FIXME overwrites notified for all roles types a user might have.
cursor.execute("""\
UPDATE role_acceptances SET notified = CURRENT_TIMESTAMP
WHERE
uuid = (SELECT uuid FROM pending_documents WHERE id = %s)
AND user_id = ANY (%s)""", (document_id, roles.keys(),)) |
def set_post_publications_state(cursor, module_ident, state_name,
state_message=''): # pragma: no cover
"""This sets the post-publication state in the database."""
cursor.execute("""\
INSERT INTO post_publications
(module_ident, state, state_message)
VALUES (%s, %s, %s)""", (module_ident, state_name, state_message)) |
def update_module_state(cursor, module_ident,
state_name, recipe): # pragma: no cover
"""This updates the module's state in the database."""
cursor.execute("""\
UPDATE modules
SET stateid = (
SELECT stateid FROM modulestates WHERE statename = %s
), recipe = %s, baked = now() WHERE module_ident = %s""",
(state_name, recipe, module_ident)) |
def get_moderation(request):
"""Return the list of publications that need moderation."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT id, created, publisher, publication_message,
(select array_agg(row_to_json(pd))
from pending_documents as pd
where pd.publication_id = p.id) AS models
FROM publications AS p
WHERE state = 'Waiting for moderation') AS combined_rows""")
moderations = [x[0] for x in cursor.fetchall()]
return moderations |
def includeme(config):
"""Configures the session manager"""
settings = config.registry.settings
session_factory = SignedCookieSessionFactory(settings['session_key'])
config.set_session_factory(session_factory) |
def admin_print_styles(request):
"""
Returns a dictionary of all unique print_styles, and their latest tag,
revision, and recipe_type.
"""
styles = []
# This fetches all recipes that have been used to successfully bake a
# current book plus all default recipes that have not yet been used
# as well as "bad" books that are not "current" state, but would otherwise
# be the latest/current for that book
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
WITH latest AS (SELECT print_style, recipe,
count(*), count(nullif(stateid, 1)) as bad
FROM modules m
WHERE portal_type = 'Collection'
AND recipe IS NOT NULL
AND (
baked IS NOT NULL OR (
baked IS NULL AND stateid not in (1,8)
)
)
AND ARRAY [major_version, minor_version] = (
SELECT max(ARRAY[major_version,minor_version]) FROM
modules where m.uuid= uuid)
GROUP BY print_style, recipe
),
defaults AS (SELECT print_style, fileid AS recipe
FROM default_print_style_recipes d
WHERE not exists (SELECT 1
FROM latest WHERE latest.recipe = d.fileid)
)
SELECT coalesce(ps.print_style, '(custom)') as print_style,
ps.title, coalesce(ps.recipe_type, 'web') as type,
ps.revised, ps.tag, ps.commit_id, la.count, la.bad
FROM latest la LEFT JOIN print_style_recipes ps ON
la.print_style = ps.print_style AND
la.recipe = ps.fileid
UNION ALL
SELECT ps.print_style, ps.title, ps.recipe_type,
ps.revised, ps.tag, ps.commit_id, 0 AS count, 0 AS bad
FROM defaults de JOIN print_style_recipes ps ON
de.print_style = ps.print_style AND
de.recipe = ps.fileid
ORDER BY revised desc NULLS LAST, print_style
""")
for row in cursor.fetchall():
styles.append({
'print_style': row['print_style'],
'title': row['title'],
'type': row['type'],
'revised': row['revised'],
'tag': row['tag'],
'commit_id': row['commit_id'],
'number': row['count'],
'bad': row['bad'],
'link': request.route_path('admin-print-style-single',
style=row['print_style'])
})
return {'styles': styles} |
def admin_print_styles_single(request):
""" Returns all books with any version of the given print style.
Returns the print_style, recipe type, num books using the print_style,
along with a dictionary of the book, author, revision date, recipe,
tag of the print_style, and a link to the content.
"""
style = request.matchdict['style']
# do db search to get file id and other info on the print_style
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
if style != '(custom)':
cursor.execute("""
SELECT fileid, recipe_type, title
FROM default_print_style_recipes
WHERE print_style=%s
""", vars=(style,))
info = cursor.fetchall()
if len(info) < 1:
current_recipe = None
recipe_type = None
status = None
else:
current_recipe = info[0]['fileid']
recipe_type = info[0]['recipe_type']
status = 'current'
cursor.execute("""\
SELECT name, authors, lm.revised, lm.recipe, psr.tag,
f.sha1 as hash, psr.commit_id, uuid,
ident_hash(uuid, major_version, minor_version)
FROM modules as lm
LEFT JOIN print_style_recipes as psr
ON (psr.print_style = lm.print_style and
psr.fileid = lm.recipe)
LEFT JOIN files f ON psr.fileid = f.fileid
WHERE lm.print_style=%s
AND portal_type='Collection'
AND ARRAY [major_version, minor_version] = (
SELECT max(ARRAY[major_version,minor_version])
FROM modules WHERE lm.uuid = uuid)
ORDER BY psr.tag DESC;
""", vars=(style,))
else:
current_recipe = '(custom)'
recipe_type = '(custom)'
cursor.execute("""\
SELECT name, authors, lm.revised, lm.recipe, NULL as tag,
f.sha1 as hash, NULL as commit_id, uuid,
ident_hash(uuid, major_version, minor_version)
FROM modules as lm
JOIN files f ON lm.recipe = f.fileid
WHERE portal_type='Collection'
AND NOT EXISTS (
SELECT 1 from print_style_recipes psr
WHERE psr.fileid = lm.recipe)
AND ARRAY [major_version, minor_version] = (
SELECT max(ARRAY[major_version,minor_version])
FROM modules WHERE lm.uuid = uuid)
ORDER BY uuid, recipe, revised DESC;
""", vars=(style,))
status = '(custom)'
collections = []
for row in cursor.fetchall():
recipe = row['recipe']
if (status != '(custom)' and
current_recipe is not None and
recipe != current_recipe):
status = 'stale'
collections.append({
'title': row['name'].decode('utf-8'),
'authors': row['authors'],
'revised': row['revised'],
'recipe': row['hash'],
'recipe_link': request.route_path('get-resource',
hash=row['hash']),
'tag': row['tag'],
'ident_hash': row['ident_hash'],
'link': request.route_path('get-content',
ident_hash=row['ident_hash']),
'status': status,
'status_link': request.route_path(
'admin-content-status-single', uuid=row['uuid']),
})
return {'number': len(collections),
'collections': collections,
'print_style': style,
'recipe_type': recipe_type} |
def get_api_keys(request):
"""Return the list of API keys."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT id, key, name, groups FROM api_keys
) AS combined_rows""")
api_keys = [x[0] for x in cursor.fetchall()]
return api_keys |
def get_baking_statuses_sql(get_request):
""" Creates SQL to get info on baking books filtered from GET request.
All books that have ever attempted to bake will be retured if they
pass the filters in the GET request.
If a single book has been requested to bake multiple times there will
be a row for each of the baking attempts.
By default the results are sorted in descending order of when they were
requested to bake.
N.B. The version reported for a print-style linked recipe will the the
lowest cnx-recipes release installed that contains the exact recipe
used to bake that book, regardless of when the book was baked relative
to recipe releases. E.g. if a book uses the 'physics' recipe, and it is
identical for versions 1.1, 1.2, 1.3, and 1.4, then it will be reported
as version 1.1, even if the most recent release is tagged 1.4.
"""
args = {}
sort = get_request.get('sort', 'bpsa.created DESC')
if (len(sort.split(" ")) != 2 or
sort.split(" ")[0] not in SORTS_DICT.keys() or
sort.split(" ")[1] not in ARROW_MATCH.keys()):
raise httpexceptions.HTTPBadRequest(
'invalid sort: {}'.format(sort))
if sort == "STATE ASC" or sort == "STATE DESC":
sort = 'bpsa.created DESC'
uuid_filter = get_request.get('uuid', '').strip()
author_filter = get_request.get('author', '').strip()
latest_filter = get_request.get('latest', False)
sql_filters = "WHERE"
if latest_filter:
sql_filters += """ ARRAY [m.major_version, m.minor_version] = (
SELECT max(ARRAY[major_version,minor_version]) FROM
modules where m.uuid= uuid) AND """
if uuid_filter != '':
args['uuid'] = uuid_filter
sql_filters += " m.uuid=%(uuid)s AND "
if author_filter != '':
author_filter = author_filter.decode('utf-8')
sql_filters += " %(author)s=ANY(m.authors) "
args["author"] = author_filter
if sql_filters.endswith("AND "):
sql_filters = sql_filters[:-4]
if sql_filters == "WHERE":
sql_filters = ""
# FIXME celery AsyncResult API is soooo sloow that this page takes
# 2 min. or more to load on production. As an workaround, this code
# accesses the celery_taskmeta table directly. Need to remove that access
# once we track enough state info ourselves. Want to track when queued,
# started, ended, etc. for future monitoring of baking system performance
# as well.
# The 'limit 1' subselect is to ensure the "oldest identical version"
# for recipes released as part of cnx-recipes (avoids one line per
# identical recipe file in different releases, for a single baking job)
statement = """
SELECT m.name, m.authors, m.uuid,
module_version(m.major_version,m.minor_version)
as current_version,
m.print_style,
CASE WHEN f.sha1 IS NOT NULL
THEN coalesce(dps.print_style,'(custom)')
ELSE dps.print_style
END AS recipe_name,
(select tag from print_style_recipes
where print_style = m.print_style
and fileid = m.recipe
order by revised asc limit 1) as recipe_tag,
coalesce(dps.fileid, m.recipe) as latest_recipe_id,
m.recipe as recipe_id,
f.sha1 as recipe,
m.module_ident,
ident_hash(m.uuid, m.major_version, m.minor_version),
bpsa.created, ctm.traceback,
CASE WHEN ctm.status = 'SUCCESS'
AND ms.statename = 'fallback'
THEN 'FALLBACK'
ELSE ctm.status
END as state
FROM document_baking_result_associations AS bpsa
INNER JOIN modules AS m USING (module_ident)
INNER JOIN modulestates as ms USING (stateid)
LEFT JOIN celery_taskmeta AS ctm
ON bpsa.result_id = ctm.task_id::uuid
LEFT JOIN default_print_style_recipes as dps
ON dps.print_style = m.print_style
LEFT JOIN latest_modules as lm
ON lm.uuid=m.uuid
LEFT JOIN files f on m.recipe = f.fileid
{}
ORDER BY {};
""".format(sql_filters, sort)
args.update({'sort': sort})
return statement, args |
def admin_content_status(request):
"""
Returns a dictionary with the states and info of baking books,
and the filters from the GET request to pre-populate the form.
"""
statement, sql_args = get_baking_statuses_sql(request.GET)
states = []
status_filters = request.params.getall('status_filter') or []
state_icons = dict(STATE_ICONS)
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(statement, vars=sql_args)
for row in cursor.fetchall():
message = ''
state = row['state'] or 'PENDING'
if status_filters and state not in status_filters:
continue
if state == 'FAILURE': # pragma: no cover
if row['traceback'] is not None:
message = row['traceback'].split("\n")[-2]
latest_recipe = row['latest_recipe_id']
current_recipe = row['recipe_id']
if (current_recipe is not None and
current_recipe != latest_recipe):
state += ' stale_recipe'
state_icon = state
if state[:7] == "SUCCESS" and len(state) > 7:
state_icon = 'unknown'
states.append({
'title': row['name'].decode('utf-8'),
'authors': format_authors(row['authors']),
'uuid': row['uuid'],
'print_style': row['print_style'],
'print_style_link': request.route_path(
'admin-print-style-single', style=row['print_style']),
'recipe': row['recipe'],
'recipe_name': row['recipe_name'],
'recipe_tag': row['recipe_tag'],
'recipe_link': request.route_path(
'get-resource', hash=row['recipe']),
'created': row['created'],
'state': state,
'state_message': message,
'state_icon': state_icons.get(
state_icon, DEFAULT_ICON),
'status_link': request.route_path(
'admin-content-status-single', uuid=row['uuid']),
'content_link': request.route_path(
'get-content', ident_hash=row['ident_hash'])
})
sort = request.params.get('sort', 'bpsa.created DESC')
sort_match = SORTS_DICT[sort.split(' ')[0]]
sort_arrow = ARROW_MATCH[sort.split(' ')[1]]
if sort == "STATE ASC":
states.sort(key=lambda x: x['state'])
if sort == "STATE DESC":
states.sort(key=lambda x: x['state'], reverse=True)
num_entries = request.params.get('number', 100) or 100
page = request.params.get('page', 1) or 1
try:
page = int(page)
num_entries = int(num_entries)
start_entry = (page - 1) * num_entries
except ValueError:
raise httpexceptions.HTTPBadRequest(
'invalid page({}) or entries per page({})'.
format(page, num_entries))
total_entries = len(states)
states = states[start_entry: start_entry + num_entries]
returns = sql_args
returns.update({'start_entry': start_entry,
'num_entries': num_entries,
'page': page,
'total_entries': total_entries,
'states': states,
'sort_' + sort_match: sort_arrow,
'sort': sort,
'domain': request.host,
'latest_only': request.GET.get('latest', False),
'STATE_ICONS': STATE_ICONS,
'status_filters': status_filters or [
i[0] for i in STATE_ICONS]})
return returns |
def admin_content_status_single(request):
"""
Returns a dictionary with all the past baking statuses of a single book.
"""
uuid = request.matchdict['uuid']
try:
UUID(uuid)
except ValueError:
raise httpexceptions.HTTPBadRequest(
'{} is not a valid uuid'.format(uuid))
statement, sql_args = get_baking_statuses_sql({'uuid': uuid})
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(statement, sql_args)
modules = cursor.fetchall()
if len(modules) == 0:
raise httpexceptions.HTTPBadRequest(
'{} is not a book'.format(uuid))
states = []
collection_info = modules[0]
for row in modules:
message = ''
state = row['state'] or 'PENDING'
if state == 'FAILURE': # pragma: no cover
if row['traceback'] is not None:
message = row['traceback']
latest_recipe = row['latest_recipe_id']
current_recipe = row['recipe_id']
if (latest_recipe is not None and
current_recipe != latest_recipe):
state += ' stale_recipe'
states.append({
'version': row['current_version'],
'recipe': row['recipe'],
'created': str(row['created']),
'state': state,
'state_message': message,
})
return {'uuid': str(collection_info['uuid']),
'title': collection_info['name'].decode('utf-8'),
'authors': format_authors(collection_info['authors']),
'print_style': collection_info['print_style'],
'current_recipe': collection_info['recipe_id'],
'current_ident': collection_info['module_ident'],
'current_state': states[0]['state'],
'states': states} |
def admin_content_status_single_POST(request):
""" Retriggers baking for a given book. """
args = admin_content_status_single(request)
title = args['title']
if args['current_state'] == 'SUCCESS':
args['response'] = title + ' is not stale, no need to bake'
return args
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("SELECT stateid FROM modules WHERE module_ident=%s",
vars=(args['current_ident'],))
data = cursor.fetchall()
if len(data) == 0:
raise httpexceptions.HTTPBadRequest(
'invalid module_ident: {}'.format(args['current_ident']))
if data[0][0] == 5 or data[0][0] == 6:
args['response'] = title + ' is already baking/set to bake'
return args
cursor.execute("""UPDATE modules SET stateid=5
WHERE module_ident=%s""",
vars=(args['current_ident'],))
args['response'] = title + " set to bake!"
return args |
def _insert_optional_roles(cursor, model, ident):
"""Inserts the optional roles if values for the optional roles
exist.
"""
optional_roles = [
# (<metadata-attr>, <db-role-id>,),
('translators', 4,),
('editors', 5,),
]
for attr, role_id in optional_roles:
roles = model.metadata.get(attr)
if not roles:
# Bail out, no roles for this type.
continue
usernames = [parse_user_uri(x['id']) for x in roles]
cursor.execute("""\
INSERT INTO moduleoptionalroles (module_ident, roleid, personids)
VALUES (%s, %s, %s)""", (ident, role_id, usernames,)) |
def _insert_metadata(cursor, model, publisher, message):
"""Insert a module with the given ``metadata``."""
params = model.metadata.copy()
params['publisher'] = publisher
params['publication_message'] = message
params['_portal_type'] = _model_to_portaltype(model)
params['summary'] = str(cnxepub.DocumentSummaryFormatter(model))
# Transform person structs to id lists for database array entry.
for person_field in ATTRIBUTED_ROLE_KEYS:
params[person_field] = [parse_user_uri(x['id'])
for x in params.get(person_field, [])]
params['parent_ident_hash'] = parse_parent_ident_hash(model)
# Assign the id and version if one is known.
if model.ident_hash is not None:
uuid, version = split_ident_hash(model.ident_hash,
split_version=True)
params['_uuid'] = uuid
params['_major_version'], params['_minor_version'] = version
# Lookup legacy ``moduleid``.
cursor.execute("SELECT moduleid FROM latest_modules WHERE uuid = %s",
(uuid,))
# There is the chance that a uuid and version have been set,
# but a previous publication does not exist. Therefore the
# moduleid will not be found. This happens on a pre-publication.
try:
moduleid = cursor.fetchone()[0]
except TypeError: # NoneType
moduleid = None
params['_moduleid'] = moduleid
# Verify that uuid is reserved in document_contols. If not, add it.
cursor.execute("SELECT * from document_controls where uuid = %s",
(uuid,))
try:
cursor.fetchone()[0]
except TypeError: # NoneType
cursor.execute("INSERT INTO document_controls (uuid) VALUES (%s)",
(uuid,))
created = model.metadata.get('created', None)
# Format the statement to accept the identifiers.
stmt = MODULE_INSERTION_TEMPLATE.format(**{
'__uuid__': "%(_uuid)s::uuid",
'__major_version__': "%(_major_version)s",
'__minor_version__': "%(_minor_version)s",
'__moduleid__': moduleid is None and "DEFAULT" or "%(_moduleid)s",
'__created__': created is None and "DEFAULT" or "%(created)s",
})
else:
created = model.metadata.get('created', None)
# Format the statement for defaults.
stmt = MODULE_INSERTION_TEMPLATE.format(**{
'__uuid__': "DEFAULT",
'__major_version__': "DEFAULT",
'__minor_version__': "DEFAULT",
'__moduleid__': "DEFAULT",
'__created__': created is None and "DEFAULT" or "%(created)s",
})
# Insert the metadata
cursor.execute(stmt, params)
module_ident, ident_hash = cursor.fetchone()
# Insert optional roles
_insert_optional_roles(cursor, model, module_ident)
return module_ident, ident_hash |
def _get_file_sha1(file):
"""Return the SHA1 hash of the given a file-like object as ``file``.
This will seek the file back to 0 when it's finished.
"""
bits = file.read()
file.seek(0)
h = hashlib.new('sha1', bits).hexdigest()
return h |
def _insert_file(cursor, file, media_type):
"""Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file.
"""
resource_hash = _get_file_sha1(file)
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s",
(resource_hash,))
try:
fileid = cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO files (file, media_type) "
"VALUES (%s, %s)"
"RETURNING fileid",
(psycopg2.Binary(file.read()), media_type,))
fileid = cursor.fetchone()[0]
return fileid, resource_hash |
def _insert_resource_file(cursor, module_ident, resource):
"""Insert a resource into the modules_files table. This will
create a new file entry or associates an existing one.
"""
with resource.open() as file:
fileid, _ = _insert_file(cursor, file, resource.media_type)
# Is this file legitimately used twice within the same content?
cursor.execute("""\
select
(fileid = %s) as is_same_file
from module_files
where module_ident = %s and filename = %s""",
(fileid, module_ident, resource.filename,))
try:
is_same_file = cursor.fetchone()[0]
except TypeError: # NoneType
is_same_file = None
if is_same_file:
# All is good, bail out.
return
elif is_same_file is not None: # pragma: no cover
# This means the file is not the same, but a filename
# conflict exists.
# FFF At this time, it is impossible to get to this logic.
raise Exception("filename conflict")
args = (module_ident, fileid, resource.filename,)
cursor.execute("""\
INSERT INTO module_files (module_ident, fileid, filename)
VALUES (%s, %s, %s)""", args) |
def _insert_tree(cursor, tree, parent_id=None, index=0, is_collated=False):
"""Inserts a binder tree into the archive."""
if isinstance(tree, dict):
if tree['id'] == 'subcol':
document_id = None
title = tree['title']
else:
cursor.execute("""\
SELECT module_ident, name
FROM modules
WHERE ident_hash(uuid,major_version,minor_version) = %s
""", (tree['id'],))
try:
document_id, document_title = cursor.fetchone()
except TypeError: # NoneType
raise ValueError("Missing published document for '{}'."
.format(tree['id']))
if tree.get('title', None):
title = tree['title']
else:
title = document_title
# TODO We haven't settled on a flag (name or value)
# to pin the node to a specific version.
is_latest = True
cursor.execute(TREE_NODE_INSERT,
dict(document_id=document_id, parent_id=parent_id,
title=title, child_order=index,
is_latest=is_latest, is_collated=is_collated))
node_id = cursor.fetchone()[0]
if 'contents' in tree:
_insert_tree(cursor, tree['contents'], parent_id=node_id,
is_collated=is_collated)
elif isinstance(tree, list):
for tree_node in tree:
_insert_tree(cursor, tree_node, parent_id=parent_id,
index=tree.index(tree_node), is_collated=is_collated) |
def publish_model(cursor, model, publisher, message):
"""Publishes the ``model`` and return its ident_hash."""
publishers = publisher
if isinstance(publishers, list) and len(publishers) > 1:
raise ValueError("Only one publisher is allowed. '{}' "
"were given: {}"
.format(len(publishers), publishers))
module_ident, ident_hash = _insert_metadata(cursor, model,
publisher, message)
for resource in getattr(model, 'resources', []):
_insert_resource_file(cursor, module_ident, resource)
if isinstance(model, Document):
html = bytes(cnxepub.DocumentContentFormatter(model))
sha1 = hashlib.new('sha1', html).hexdigest()
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s", (sha1,))
try:
fileid = cursor.fetchone()[0]
except TypeError:
file_args = {
'media_type': 'text/html',
'data': psycopg2.Binary(html),
}
cursor.execute("""\
insert into files (file, media_type)
VALUES (%(data)s, %(media_type)s)
returning fileid""", file_args)
fileid = cursor.fetchone()[0]
args = {
'module_ident': module_ident,
'filename': 'index.cnxml.html',
'fileid': fileid,
}
cursor.execute("""\
INSERT INTO module_files
(module_ident, fileid, filename)
VALUES
(%(module_ident)s, %(fileid)s, %(filename)s)""", args)
elif isinstance(model, Binder):
tree = cnxepub.model_to_tree(model)
tree = _insert_tree(cursor, tree)
return ident_hash |
def publish_composite_model(cursor, model, parent_model, publisher, message):
"""Publishes the ``model`` and return its ident_hash."""
if not (isinstance(model, CompositeDocument) or
(isinstance(model, Binder) and
model.metadata.get('type') == 'composite-chapter')):
raise ValueError("This function only publishes Composite"
"objects. '{}' was given.".format(type(model)))
if issequence(publisher) and len(publisher) > 1:
raise ValueError("Only one publisher is allowed. '{}' "
"were given: {}"
.format(len(publisher), publisher))
module_ident, ident_hash = _insert_metadata(cursor, model,
publisher, message)
model.id, model.metadata['version'] = split_ident_hash(ident_hash)
model.set_uri('cnx-archive', ident_hash)
for resource in model.resources:
_insert_resource_file(cursor, module_ident, resource)
if isinstance(model, CompositeDocument):
html = bytes(cnxepub.DocumentContentFormatter(model))
fileid, _ = _insert_file(cursor, io.BytesIO(html), 'text/html')
file_arg = {
'module_ident': module_ident,
'parent_ident_hash': parent_model.ident_hash,
'fileid': fileid,
}
cursor.execute("""\
INSERT INTO collated_file_associations
(context, item, fileid)
VALUES
((SELECT module_ident FROM modules
WHERE ident_hash(uuid, major_version, minor_version)
= %(parent_ident_hash)s),
%(module_ident)s, %(fileid)s)""", file_arg)
return ident_hash |
def publish_collated_document(cursor, model, parent_model):
"""Publish a given `module`'s collated content in the context of
the `parent_model`. Note, the model's content is expected to already
have the collated content. This will just persist that content to
the archive.
"""
html = bytes(cnxepub.DocumentContentFormatter(model))
sha1 = hashlib.new('sha1', html).hexdigest()
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s", (sha1,))
try:
fileid = cursor.fetchone()[0]
except TypeError:
file_args = {
'media_type': 'text/html',
'data': psycopg2.Binary(html),
}
cursor.execute("""\
INSERT INTO files (file, media_type)
VALUES (%(data)s, %(media_type)s)
RETURNING fileid""", file_args)
fileid = cursor.fetchone()[0]
args = {
'module_ident_hash': model.ident_hash,
'parent_ident_hash': parent_model.ident_hash,
'fileid': fileid,
}
stmt = """\
INSERT INTO collated_file_associations (context, item, fileid)
VALUES
((SELECT module_ident FROM modules
WHERE ident_hash(uuid, major_version, minor_version)
= %(parent_ident_hash)s),
(SELECT module_ident FROM modules
WHERE ident_hash(uuid, major_version, minor_version)
= %(module_ident_hash)s),
%(fileid)s)"""
cursor.execute(stmt, args) |
def publish_collated_tree(cursor, tree):
"""Publish a given collated `tree` (containing newly added
`CompositeDocument` objects and number inforation)
alongside the original tree.
"""
tree = _insert_tree(cursor, tree, is_collated=True)
return tree |
def republish_binders(cursor, models):
"""Republish the Binders that share Documents in the publication context.
This needs to be given all the models in the publication context."""
documents = set([])
binders = set([])
history_mapping = {} # <previous-ident-hash>: <current-ident-hash>
if not isinstance(models, (list, tuple, set,)):
raise TypeError("``models`` Must be a sequence of model objects."
"We were given: {}".format(models))
for model in models:
if isinstance(model, (cnxepub.Binder,)):
binders.add(split_ident_hash(model.ident_hash)[0])
for doc in cnxepub.flatten_to_documents(model):
documents.add(split_ident_hash(doc.ident_hash))
else:
documents.add(split_ident_hash(model.ident_hash))
to_be_republished = []
# What binders are these documents a part of?
for (uuid, version) in documents:
ident_hash = join_ident_hash(uuid, version)
previous_ident_hash = get_previous_publication(cursor, ident_hash)
if previous_ident_hash is None:
# Has no prior existence.
continue
else:
history_mapping[previous_ident_hash] = ident_hash
cursor.execute("""\
WITH RECURSIVE t(nodeid, parent_id, documentid, path) AS (
SELECT tr.nodeid, tr.parent_id, tr.documentid, ARRAY[tr.nodeid]
FROM trees tr
WHERE tr.documentid = (
SELECT module_ident FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s)
UNION ALL
SELECT c.nodeid, c.parent_id, c.documentid, path || ARRAY[c.nodeid]
FROM trees c JOIN t ON (c.nodeid = t.parent_id)
WHERE not c.nodeid = ANY(t.path)
)
SELECT ident_hash(uuid, major_version, minor_version)
FROM t JOIN latest_modules m ON (t.documentid = m.module_ident)
WHERE t.parent_id IS NULL
""",
(previous_ident_hash,))
to_be_republished.extend([split_ident_hash(x[0])
for x in cursor.fetchall()])
to_be_republished = set(to_be_republished)
republished_ident_hashes = []
# Republish the Collections set.
for (uuid, version) in to_be_republished:
if uuid in binders:
# This binder is already in the publication context,
# don't try to publish it again.
continue
ident_hash = join_ident_hash(uuid, version)
bumped_version = bump_version(cursor, uuid, is_minor_bump=True)
republished_ident_hash = republish_collection(cursor, ident_hash,
version=bumped_version)
# Set the identifier history.
history_mapping[ident_hash] = republished_ident_hash
rebuild_collection_tree(cursor, ident_hash, history_mapping)
republished_ident_hashes.append(republished_ident_hash)
return republished_ident_hashes |
def get_previous_publication(cursor, ident_hash):
"""Get the previous publication of the given
publication as an ident-hash.
"""
cursor.execute("""\
WITH contextual_module AS (
SELECT uuid, module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s)
SELECT ident_hash(m.uuid, m.major_version, m.minor_version)
FROM modules AS m JOIN contextual_module AS context ON (m.uuid = context.uuid)
WHERE
m.module_ident < context.module_ident
ORDER BY revised DESC
LIMIT 1""", (ident_hash,))
try:
previous_ident_hash = cursor.fetchone()[0]
except TypeError: # NoneType
previous_ident_hash = None
return previous_ident_hash |
def bump_version(cursor, uuid, is_minor_bump=False):
"""Bump to the next version of the given content identified
by ``uuid``. Returns the next available version as a version tuple,
containing major and minor version.
If ``is_minor_bump`` is ``True`` the version will minor bump. That is
1.2 becomes 1.3 in the case of Collections. And 2 becomes 3 for
Modules regardless of this option.
"""
cursor.execute("""\
SELECT portal_type, major_version, minor_version
FROM latest_modules
WHERE uuid = %s::uuid""", (uuid,))
type_, major_version, minor_version = cursor.fetchone()
incr = 1
if type_ == 'Collection' and is_minor_bump:
minor_version = minor_version + incr
else:
major_version = major_version + incr
return (major_version, minor_version,) |
def republish_collection(cursor, ident_hash, version):
"""Republish the collection identified as ``ident_hash`` with
the given ``version``.
"""
if not isinstance(version, (list, tuple,)):
split_version = version.split('.')
if len(split_version) == 1:
split_version.append(None)
version = tuple(split_version)
major_version, minor_version = version
cursor.execute("""\
WITH previous AS (
SELECT module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s),
inserted AS (
INSERT INTO modules
(uuid, major_version, minor_version, revised,
portal_type, moduleid,
name, created, language,
submitter, submitlog,
abstractid, licenseid, parent, parentauthors,
authors, maintainers, licensors,
google_analytics, buylink,
stateid, doctype)
SELECT
uuid, %s, %s, CURRENT_TIMESTAMP,
portal_type, moduleid,
name, created, language,
submitter, submitlog,
abstractid, licenseid, parent, parentauthors,
authors, maintainers, licensors,
google_analytics, buylink,
stateid, doctype
FROM modules AS m JOIN previous AS p ON (m.module_ident = p.module_ident)
RETURNING
ident_hash(uuid, major_version, minor_version) AS ident_hash,
module_ident),
keywords AS (
INSERT INTO modulekeywords (module_ident, keywordid)
SELECT i.module_ident, keywordid
FROM modulekeywords AS mk, inserted AS i, previous AS p
WHERE mk.module_ident = p.module_ident),
tags AS (
INSERT INTO moduletags (module_ident, tagid)
SELECT i.module_ident, tagid
FROM moduletags AS mt, inserted AS i, previous AS p
WHERE mt.module_ident = p.module_ident)
SELECT ident_hash FROM inserted""",
(ident_hash, major_version, minor_version,))
repub_ident_hash = cursor.fetchone()[0]
return repub_ident_hash |
def rebuild_collection_tree(cursor, ident_hash, history_map):
"""Create a new tree for the collection based on the old tree but with
new document ids
"""
collection_tree_sql = """\
WITH RECURSIVE t(nodeid, parent_id, documentid, title, childorder, latest,
ident_hash, path) AS (
SELECT
tr.nodeid, tr.parent_id, tr.documentid,
tr.title, tr.childorder, tr.latest,
(SELECT ident_hash(uuid, major_version, minor_version)
FROM modules
WHERE module_ident = tr.documentid) AS ident_hash,
ARRAY[tr.nodeid]
FROM trees AS tr
WHERE tr.documentid = (
SELECT module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s)
AND tr.is_collated = FALSE
UNION ALL
SELECT
c.nodeid, c.parent_id, c.documentid, c.title, c.childorder, c.latest,
(SELECT ident_hash(uuid, major_version, minor_version)
FROM modules
WHERE module_ident = c.documentid) AS ident_hash,
path || ARRAY[c.nodeid]
FROM trees AS c JOIN t ON (c.parent_id = t.nodeid)
WHERE not c.nodeid = ANY(t.path) AND c.is_collated = FALSE
)
SELECT row_to_json(row) FROM (SELECT * FROM t) AS row"""
tree_insert_sql = """\
INSERT INTO trees
(nodeid, parent_id,
documentid,
title, childorder, latest)
VALUES
(DEFAULT, %(parent_id)s,
(SELECT module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = \
%(ident_hash)s),
%(title)s, %(childorder)s, %(latest)s)
RETURNING nodeid"""
def get_tree():
cursor.execute(collection_tree_sql, (ident_hash,))
for row in cursor.fetchall():
yield row[0]
def insert(fields):
cursor.execute(tree_insert_sql, fields)
results = cursor.fetchone()[0]
return results
tree = {} # {<current-nodeid>: {<row-data>...}, ...}
children = {} # {<nodeid>: [<child-nodeid>, ...], <child-nodeid>: [...]}
for node in get_tree():
tree[node['nodeid']] = node
children.setdefault(node['parent_id'], [])
children[node['parent_id']].append(node['nodeid'])
def build_tree(nodeid, parent_id):
data = tree[nodeid]
data['parent_id'] = parent_id
if history_map.get(data['ident_hash']) is not None \
and (data['latest'] or parent_id is None):
data['ident_hash'] = history_map[data['ident_hash']]
new_nodeid = insert(data)
for child_nodeid in children.get(nodeid, []):
build_tree(child_nodeid, new_nodeid)
root_node = children[None][0]
build_tree(root_node, None) |
def publish(request):
"""Accept a publication request at form value 'epub'"""
if 'epub' not in request.POST:
raise httpexceptions.HTTPBadRequest("Missing EPUB in POST body.")
is_pre_publication = asbool(request.POST.get('pre-publication'))
epub_upload = request.POST['epub'].file
try:
epub = cnxepub.EPUB.from_file(epub_upload)
except: # noqa: E722
raise httpexceptions.HTTPBadRequest('Format not recognized.')
# Make a publication entry in the database for status checking
# the publication. This also creates publication entries for all
# of the content in the EPUB.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
epub_upload.seek(0)
publication_id, publications = add_publication(
cursor, epub, epub_upload, is_pre_publication)
# Poke at the publication & lookup its state.
state, messages = poke_publication_state(publication_id)
response_data = {
'publication': publication_id,
'mapping': publications,
'state': state,
'messages': messages,
}
return response_data |
def get_publication(request):
"""Lookup publication state"""
publication_id = request.matchdict['id']
state, messages = check_publication_state(publication_id)
response_data = {
'publication': publication_id,
'state': state,
'messages': messages,
}
return response_data |
def get_accept_license(request):
"""This produces JSON data for a user (at ``uid``) to view the license(s)
they have accepted or will need to accept for a publication (at ``id``).
"""
publication_id = request.matchdict['id']
user_id = request.matchdict['uid']
# FIXME Is this an active publication?
# TODO Verify the accepting user is the one making the request.
# For each pending document, accept the license.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""
SELECT row_to_json(combined_rows) FROM (
SELECT
pd.uuid AS id,
ident_hash(pd.uuid, pd.major_version, pd.minor_version) \
AS ident_hash,
accepted AS is_accepted
FROM
pending_documents AS pd
NATURAL JOIN license_acceptances AS la
WHERE pd.publication_id = %s AND user_id = %s
) as combined_rows;""",
(publication_id, user_id))
user_documents = [r[0] for r in cursor.fetchall()]
return {'publication_id': publication_id,
'user_id': user_id,
'documents': user_documents,
} |
def post_accept_license(request):
"""Allows the user (at ``uid``) to accept the license(s) for
a publication (at ``id``).
"""
publication_id = request.matchdict['id']
uid = request.matchdict['uid']
# TODO Verify the accepting user is the one making the request.
# They could be authenticated but not be the license acceptor.
post_data = request.json
accepted = []
denied = []
try:
documents = post_data['documents']
for doc_acceptance in documents:
if doc_acceptance['is_accepted'] is None:
continue
elif doc_acceptance['is_accepted']:
accepted.append(doc_acceptance['id'])
else:
denied.append(doc_acceptance['id'])
except KeyError:
raise httpexceptions.BadRequest("Posted data is invalid.")
# For each pending document, accept/deny the license.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
accept_publication_license(cursor, publication_id, uid,
accepted, True)
accept_publication_license(cursor, publication_id, uid,
denied, False)
location = request.route_url('publication-license-acceptance',
id=publication_id, uid=uid)
# Poke publication to change state.
poke_publication_state(publication_id)
return httpexceptions.HTTPFound(location=location) |
def bake_content(request):
"""Invoke the baking process - trigger post-publication"""
ident_hash = request.matchdict['ident_hash']
try:
id, version = split_ident_hash(ident_hash)
except IdentHashError:
raise httpexceptions.HTTPNotFound()
if not version:
raise httpexceptions.HTTPBadRequest('must specify the version')
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT bool(portal_type = 'Collection'), stateid, module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s
""", (ident_hash,))
try:
is_binder, stateid, module_ident = cursor.fetchone()
except TypeError:
raise httpexceptions.HTTPNotFound()
if not is_binder:
raise httpexceptions.HTTPBadRequest(
'{} is not a book'.format(ident_hash))
if stateid == 5:
cursor.execute("""\
SELECT pg_notify('post_publication',
'{"module_ident": '||%s||',
"ident_hash": "'||%s||'",
"timestamp": "'||CURRENT_TIMESTAMP||'"}')
""", (module_ident, ident_hash))
else:
cursor.execute("""\
UPDATE modules SET stateid = 5
WHERE ident_hash(uuid, major_version, minor_version) = %s
""", (ident_hash,)) |
def includeme(config):
"""Configures the caching manager"""
global cache_manager
settings = config.registry.settings
cache_manager = CacheManager(**parse_cache_config_options(settings)) |
def create_pg_notify_event(notif):
"""A factory for creating a Postgres Notification Event
(an object inheriting from `cnxpublishing.events.PGNotifyEvent`)
given `notif`, a `psycopg2.extensions.Notify` object.
"""
# TODO Lookup registered events via getAllUtilitiesRegisteredFor
# for class mapping.
if notif.channel not in _CHANNEL_MAPPER:
cls = _CHANNEL_MAPPER[None]
else:
cls = _CHANNEL_MAPPER[notif.channel]
return cls(notif) |
def create_map(self, type_from, type_to, mapping=None):
"""Method for adding mapping definitions
:param type_from: source type
:param type_to: target type
:param mapping: dictionary of mapping definitions in a form {'target_property_name',
lambda function from rhe source}
:return: None
"""
key_from = type_from.__name__
key_to = type_to.__name__
if mapping is None:
mapping = {}
if key_from in self.mappings:
inner_map = self.mappings[key_from]
if key_to in inner_map:
raise ObjectMapperException("Mapping for {0} -> {1} already exists".format(key_from, key_to))
else:
inner_map[key_to] = mapping
else:
self.mappings[key_from] = {}
self.mappings[key_from][key_to] = mapping |
def map(self, from_obj, to_type, ignore_case=False, allow_none=False, excluded=None):
"""Method for creating target object instance
:param from_obj: source object to be mapped from
:param to_type: target type
:param ignore_case: if set to true, ignores attribute case when performing the mapping
:param allow_none: if set to true, returns None if the source object is None; otherwise throws an exception
:param excluded: A list of fields to exclude when performing the mapping
:return: Instance of the target class with mapped attributes
"""
if (from_obj is None) and allow_none:
return None
else:
# one of the tests is explicitly checking for an attribute error on __dict__ if it's not set
from_obj.__dict__
inst = to_type()
key_from = from_obj.__class__.__name__
key_to = to_type.__name__
def not_private(s):
return not s.startswith('_')
def not_excluded(s):
return not (excluded and s in excluded)
from_obj_attributes = getmembers(from_obj, lambda a: not isroutine(a))
from_obj_dict = {k: v for k, v in from_obj_attributes
if not_private(k) and not_excluded(k)}
to_obj_attributes = getmembers(inst, lambda a: not isroutine(a))
to_obj_dict = {k: v for k, v in to_obj_attributes if not_private(k)}
if ignore_case:
from_props = CaseDict(from_obj_dict)
to_props = CaseDict(to_obj_dict)
else:
from_props = from_obj_dict
to_props = to_obj_dict
for prop in to_props:
if self.mappings is not None \
and key_from in self.mappings \
and key_to in self.mappings[key_from]:
if prop in self.mappings[key_from][key_to]:
# take mapping function
try:
fnc = self.mappings[key_from][key_to][prop]
if fnc is not None:
setattr(inst, prop, fnc(from_obj))
# none suppress mapping
except Exception:
raise ObjectMapperException("Invalid mapping function while setting property {0}.{1}".
format(inst.__class__.__name__, prop))
else:
# try find property with the same name in the source
if prop in from_props:
setattr(inst, prop, from_props[prop])
# case when target attribute is not mapped (can be extended)
else:
raise ObjectMapperException("No mapping defined for {0} -> {1}".format(key_from, key_to))
return inst |
def get(self, key, default=_sentinel):
"""
Gets the value from the key.
If the key doesn't exist, the default value is returned, otherwise None.
:param key: The key
:param default: The default value
:return: The value
"""
tup = self._data.get(key.lower())
if tup is not None:
return tup[1]
elif default is not _sentinel:
return default
else:
return None |
def pop(self, key, default=_sentinel):
"""
Removes the specified key and returns the corresponding value.
If key is not found, the default is returned if given, otherwise KeyError is raised.
:param key: The key
:param default: The default value
:return: The value
"""
if default is not _sentinel:
tup = self._data.pop(key.lower(), default)
else:
tup = self._data.pop(key.lower())
if tup is not default:
return tup[1]
else:
return default |
def reversals(series, left=False, right=False):
"""Iterate reversal points in the series.
A reversal point is a point in the series at which the first derivative
changes sign. Reversal is undefined at the first (last) point because the
derivative before (after) this point is undefined. The first and the last
points may be treated as reversals by setting the optional parameters
`left` and `right` to True.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, yield the first point in the series (treat it as a reversal).
right: bool, optional
If True, yield the last point in the series (treat it as a reversal).
Yields
------
float
Reversal points.
"""
series = iter(series)
x_last, x = next(series), next(series)
d_last = (x - x_last)
if left:
yield x_last
for x_next in series:
if x_next == x:
continue
d_next = x_next - x
if d_last * d_next < 0:
yield x
x_last, x = x, x_next
d_last = d_next
if right:
yield x_next |
def _sort_lows_and_highs(func):
"Decorator for extract_cycles"
@functools.wraps(func)
def wrapper(*args, **kwargs):
for low, high, mult in func(*args, **kwargs):
if low < high:
yield low, high, mult
else:
yield high, low, mult
return wrapper |
def extract_cycles(series, left=False, right=False):
"""Iterate cycles in the series.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Yields
------
cycle : tuple
Each tuple contains three floats (low, high, mult), where low and high
define cycle amplitude and mult equals to 1.0 for full cycles and 0.5
for half cycles.
"""
points = deque()
for x in reversals(series, left=left, right=right):
points.append(x)
while len(points) >= 3:
# Form ranges X and Y from the three most recent points
X = abs(points[-2] - points[-1])
Y = abs(points[-3] - points[-2])
if X < Y:
# Read the next point
break
elif len(points) == 3:
# Y contains the starting point
# Count Y as one-half cycle and discard the first point
yield points[0], points[1], 0.5
points.popleft()
else:
# Count Y as one cycle and discard the peak and the valley of Y
yield points[-3], points[-2], 1.0
last = points.pop()
points.pop()
points.pop()
points.append(last)
else:
# Count the remaining ranges as one-half cycles
while len(points) > 1:
yield points[0], points[1], 0.5
points.popleft() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.