repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
bslatkin/dpxdt | dpxdt/server/work_queue.py | heartbeat | def heartbeat(queue_name, task_id, owner, message, index):
"""Sets the heartbeat status of the task and extends its lease.
The task's lease is extended by the same amount as its last lease to
ensure that any operations following the heartbeat will still hold the
lock for the original lock period.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
message: Message to report as the task's current status.
index: Number of this message in the sequence of messages from the
current task owner, starting at zero. This lets the API receive
heartbeats out of order, yet ensure that the most recent message
is actually saved to the database. This requires the owner issuing
heartbeat messages to issue heartbeat indexes sequentially.
Returns:
True if the heartbeat message was set, False if it is lower than the
current heartbeat index.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task.
"""
task = _get_task_with_policy(queue_name, task_id, owner)
if task.heartbeat_number > index:
return False
task.heartbeat = message
task.heartbeat_number = index
# Extend the lease by the time of the last lease.
now = datetime.datetime.utcnow()
timeout_delta = task.eta - task.last_lease
task.eta = now + timeout_delta
task.last_lease = now
db.session.add(task)
signals.task_updated.send(app, task=task)
return True | python | def heartbeat(queue_name, task_id, owner, message, index):
"""Sets the heartbeat status of the task and extends its lease.
The task's lease is extended by the same amount as its last lease to
ensure that any operations following the heartbeat will still hold the
lock for the original lock period.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
message: Message to report as the task's current status.
index: Number of this message in the sequence of messages from the
current task owner, starting at zero. This lets the API receive
heartbeats out of order, yet ensure that the most recent message
is actually saved to the database. This requires the owner issuing
heartbeat messages to issue heartbeat indexes sequentially.
Returns:
True if the heartbeat message was set, False if it is lower than the
current heartbeat index.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task.
"""
task = _get_task_with_policy(queue_name, task_id, owner)
if task.heartbeat_number > index:
return False
task.heartbeat = message
task.heartbeat_number = index
# Extend the lease by the time of the last lease.
now = datetime.datetime.utcnow()
timeout_delta = task.eta - task.last_lease
task.eta = now + timeout_delta
task.last_lease = now
db.session.add(task)
signals.task_updated.send(app, task=task)
return True | [
"def",
"heartbeat",
"(",
"queue_name",
",",
"task_id",
",",
"owner",
",",
"message",
",",
"index",
")",
":",
"task",
"=",
"_get_task_with_policy",
"(",
"queue_name",
",",
"task_id",
",",
"owner",
")",
"if",
"task",
".",
"heartbeat_number",
">",
"index",
":",
"return",
"False",
"task",
".",
"heartbeat",
"=",
"message",
"task",
".",
"heartbeat_number",
"=",
"index",
"# Extend the lease by the time of the last lease.",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"timeout_delta",
"=",
"task",
".",
"eta",
"-",
"task",
".",
"last_lease",
"task",
".",
"eta",
"=",
"now",
"+",
"timeout_delta",
"task",
".",
"last_lease",
"=",
"now",
"db",
".",
"session",
".",
"add",
"(",
"task",
")",
"signals",
".",
"task_updated",
".",
"send",
"(",
"app",
",",
"task",
"=",
"task",
")",
"return",
"True"
]
| Sets the heartbeat status of the task and extends its lease.
The task's lease is extended by the same amount as its last lease to
ensure that any operations following the heartbeat will still hold the
lock for the original lock period.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
message: Message to report as the task's current status.
index: Number of this message in the sequence of messages from the
current task owner, starting at zero. This lets the API receive
heartbeats out of order, yet ensure that the most recent message
is actually saved to the database. This requires the owner issuing
heartbeat messages to issue heartbeat indexes sequentially.
Returns:
True if the heartbeat message was set, False if it is lower than the
current heartbeat index.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task. | [
"Sets",
"the",
"heartbeat",
"status",
"of",
"the",
"task",
"and",
"extends",
"its",
"lease",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L259-L303 | train |
bslatkin/dpxdt | dpxdt/server/work_queue.py | finish | def finish(queue_name, task_id, owner, error=False):
"""Marks a work item on a queue as finished.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
error: Defaults to false. True if this task's final state is an error.
Returns:
True if the task has been finished for the first time; False if the
task was already finished.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task.
"""
task = _get_task_with_policy(queue_name, task_id, owner)
if not task.status == WorkQueue.LIVE:
logging.warning('Finishing already dead task. queue=%r, task_id=%r, '
'owner=%r, status=%r',
task.queue_name, task_id, owner, task.status)
return False
if not error:
task.status = WorkQueue.DONE
else:
task.status = WorkQueue.ERROR
task.finished = datetime.datetime.utcnow()
db.session.add(task)
signals.task_updated.send(app, task=task)
return True | python | def finish(queue_name, task_id, owner, error=False):
"""Marks a work item on a queue as finished.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
error: Defaults to false. True if this task's final state is an error.
Returns:
True if the task has been finished for the first time; False if the
task was already finished.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task.
"""
task = _get_task_with_policy(queue_name, task_id, owner)
if not task.status == WorkQueue.LIVE:
logging.warning('Finishing already dead task. queue=%r, task_id=%r, '
'owner=%r, status=%r',
task.queue_name, task_id, owner, task.status)
return False
if not error:
task.status = WorkQueue.DONE
else:
task.status = WorkQueue.ERROR
task.finished = datetime.datetime.utcnow()
db.session.add(task)
signals.task_updated.send(app, task=task)
return True | [
"def",
"finish",
"(",
"queue_name",
",",
"task_id",
",",
"owner",
",",
"error",
"=",
"False",
")",
":",
"task",
"=",
"_get_task_with_policy",
"(",
"queue_name",
",",
"task_id",
",",
"owner",
")",
"if",
"not",
"task",
".",
"status",
"==",
"WorkQueue",
".",
"LIVE",
":",
"logging",
".",
"warning",
"(",
"'Finishing already dead task. queue=%r, task_id=%r, '",
"'owner=%r, status=%r'",
",",
"task",
".",
"queue_name",
",",
"task_id",
",",
"owner",
",",
"task",
".",
"status",
")",
"return",
"False",
"if",
"not",
"error",
":",
"task",
".",
"status",
"=",
"WorkQueue",
".",
"DONE",
"else",
":",
"task",
".",
"status",
"=",
"WorkQueue",
".",
"ERROR",
"task",
".",
"finished",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"db",
".",
"session",
".",
"add",
"(",
"task",
")",
"signals",
".",
"task_updated",
".",
"send",
"(",
"app",
",",
"task",
"=",
"task",
")",
"return",
"True"
]
| Marks a work item on a queue as finished.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
error: Defaults to false. True if this task's final state is an error.
Returns:
True if the task has been finished for the first time; False if the
task was already finished.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task. | [
"Marks",
"a",
"work",
"item",
"on",
"a",
"queue",
"as",
"finished",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L306-L342 | train |
bslatkin/dpxdt | dpxdt/server/work_queue.py | cancel | def cancel(**kwargs):
"""Cancels work items based on their criteria.
Args:
**kwargs: Same parameters as the query() method.
Returns:
The number of tasks that were canceled.
"""
task_list = _query(**kwargs)
for task in task_list:
task.status = WorkQueue.CANCELED
task.finished = datetime.datetime.utcnow()
db.session.add(task)
return len(task_list) | python | def cancel(**kwargs):
"""Cancels work items based on their criteria.
Args:
**kwargs: Same parameters as the query() method.
Returns:
The number of tasks that were canceled.
"""
task_list = _query(**kwargs)
for task in task_list:
task.status = WorkQueue.CANCELED
task.finished = datetime.datetime.utcnow()
db.session.add(task)
return len(task_list) | [
"def",
"cancel",
"(",
"*",
"*",
"kwargs",
")",
":",
"task_list",
"=",
"_query",
"(",
"*",
"*",
"kwargs",
")",
"for",
"task",
"in",
"task_list",
":",
"task",
".",
"status",
"=",
"WorkQueue",
".",
"CANCELED",
"task",
".",
"finished",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"db",
".",
"session",
".",
"add",
"(",
"task",
")",
"return",
"len",
"(",
"task_list",
")"
]
| Cancels work items based on their criteria.
Args:
**kwargs: Same parameters as the query() method.
Returns:
The number of tasks that were canceled. | [
"Cancels",
"work",
"items",
"based",
"on",
"their",
"criteria",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L410-L424 | train |
bslatkin/dpxdt | dpxdt/server/work_queue_handlers.py | handle_add | def handle_add(queue_name):
"""Adds a task to a queue."""
source = request.form.get('source', request.remote_addr, type=str)
try:
task_id = work_queue.add(
queue_name,
payload=request.form.get('payload', type=str),
content_type=request.form.get('content_type', type=str),
source=source,
task_id=request.form.get('task_id', type=str))
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.info('Task added: queue=%r, task_id=%r, source=%r',
queue_name, task_id, source)
return flask.jsonify(task_id=task_id) | python | def handle_add(queue_name):
"""Adds a task to a queue."""
source = request.form.get('source', request.remote_addr, type=str)
try:
task_id = work_queue.add(
queue_name,
payload=request.form.get('payload', type=str),
content_type=request.form.get('content_type', type=str),
source=source,
task_id=request.form.get('task_id', type=str))
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.info('Task added: queue=%r, task_id=%r, source=%r',
queue_name, task_id, source)
return flask.jsonify(task_id=task_id) | [
"def",
"handle_add",
"(",
"queue_name",
")",
":",
"source",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'source'",
",",
"request",
".",
"remote_addr",
",",
"type",
"=",
"str",
")",
"try",
":",
"task_id",
"=",
"work_queue",
".",
"add",
"(",
"queue_name",
",",
"payload",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'payload'",
",",
"type",
"=",
"str",
")",
",",
"content_type",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'content_type'",
",",
"type",
"=",
"str",
")",
",",
"source",
"=",
"source",
",",
"task_id",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'task_id'",
",",
"type",
"=",
"str",
")",
")",
"except",
"work_queue",
".",
"Error",
",",
"e",
":",
"return",
"utils",
".",
"jsonify_error",
"(",
"e",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"logging",
".",
"info",
"(",
"'Task added: queue=%r, task_id=%r, source=%r'",
",",
"queue_name",
",",
"task_id",
",",
"source",
")",
"return",
"flask",
".",
"jsonify",
"(",
"task_id",
"=",
"task_id",
")"
]
| Adds a task to a queue. | [
"Adds",
"a",
"task",
"to",
"a",
"queue",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue_handlers.py#L37-L53 | train |
bslatkin/dpxdt | dpxdt/server/work_queue_handlers.py | handle_lease | def handle_lease(queue_name):
"""Leases a task from a queue."""
owner = request.form.get('owner', request.remote_addr, type=str)
try:
task_list = work_queue.lease(
queue_name,
owner,
request.form.get('count', 1, type=int),
request.form.get('timeout', 60, type=int))
except work_queue.Error, e:
return utils.jsonify_error(e)
if not task_list:
return flask.jsonify(tasks=[])
db.session.commit()
task_ids = [t['task_id'] for t in task_list]
logging.debug('Task leased: queue=%r, task_ids=%r, owner=%r',
queue_name, task_ids, owner)
return flask.jsonify(tasks=task_list) | python | def handle_lease(queue_name):
"""Leases a task from a queue."""
owner = request.form.get('owner', request.remote_addr, type=str)
try:
task_list = work_queue.lease(
queue_name,
owner,
request.form.get('count', 1, type=int),
request.form.get('timeout', 60, type=int))
except work_queue.Error, e:
return utils.jsonify_error(e)
if not task_list:
return flask.jsonify(tasks=[])
db.session.commit()
task_ids = [t['task_id'] for t in task_list]
logging.debug('Task leased: queue=%r, task_ids=%r, owner=%r',
queue_name, task_ids, owner)
return flask.jsonify(tasks=task_list) | [
"def",
"handle_lease",
"(",
"queue_name",
")",
":",
"owner",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'owner'",
",",
"request",
".",
"remote_addr",
",",
"type",
"=",
"str",
")",
"try",
":",
"task_list",
"=",
"work_queue",
".",
"lease",
"(",
"queue_name",
",",
"owner",
",",
"request",
".",
"form",
".",
"get",
"(",
"'count'",
",",
"1",
",",
"type",
"=",
"int",
")",
",",
"request",
".",
"form",
".",
"get",
"(",
"'timeout'",
",",
"60",
",",
"type",
"=",
"int",
")",
")",
"except",
"work_queue",
".",
"Error",
",",
"e",
":",
"return",
"utils",
".",
"jsonify_error",
"(",
"e",
")",
"if",
"not",
"task_list",
":",
"return",
"flask",
".",
"jsonify",
"(",
"tasks",
"=",
"[",
"]",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"task_ids",
"=",
"[",
"t",
"[",
"'task_id'",
"]",
"for",
"t",
"in",
"task_list",
"]",
"logging",
".",
"debug",
"(",
"'Task leased: queue=%r, task_ids=%r, owner=%r'",
",",
"queue_name",
",",
"task_ids",
",",
"owner",
")",
"return",
"flask",
".",
"jsonify",
"(",
"tasks",
"=",
"task_list",
")"
]
| Leases a task from a queue. | [
"Leases",
"a",
"task",
"from",
"a",
"queue",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue_handlers.py#L59-L78 | train |
bslatkin/dpxdt | dpxdt/server/work_queue_handlers.py | handle_heartbeat | def handle_heartbeat(queue_name):
"""Updates the heartbeat message for a task."""
task_id = request.form.get('task_id', type=str)
message = request.form.get('message', type=str)
index = request.form.get('index', type=int)
try:
work_queue.heartbeat(
queue_name,
task_id,
request.form.get('owner', request.remote_addr, type=str),
message,
index)
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.debug('Task heartbeat: queue=%r, task_id=%r, message=%r, index=%d',
queue_name, task_id, message, index)
return flask.jsonify(success=True) | python | def handle_heartbeat(queue_name):
"""Updates the heartbeat message for a task."""
task_id = request.form.get('task_id', type=str)
message = request.form.get('message', type=str)
index = request.form.get('index', type=int)
try:
work_queue.heartbeat(
queue_name,
task_id,
request.form.get('owner', request.remote_addr, type=str),
message,
index)
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.debug('Task heartbeat: queue=%r, task_id=%r, message=%r, index=%d',
queue_name, task_id, message, index)
return flask.jsonify(success=True) | [
"def",
"handle_heartbeat",
"(",
"queue_name",
")",
":",
"task_id",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'task_id'",
",",
"type",
"=",
"str",
")",
"message",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'message'",
",",
"type",
"=",
"str",
")",
"index",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'index'",
",",
"type",
"=",
"int",
")",
"try",
":",
"work_queue",
".",
"heartbeat",
"(",
"queue_name",
",",
"task_id",
",",
"request",
".",
"form",
".",
"get",
"(",
"'owner'",
",",
"request",
".",
"remote_addr",
",",
"type",
"=",
"str",
")",
",",
"message",
",",
"index",
")",
"except",
"work_queue",
".",
"Error",
",",
"e",
":",
"return",
"utils",
".",
"jsonify_error",
"(",
"e",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"logging",
".",
"debug",
"(",
"'Task heartbeat: queue=%r, task_id=%r, message=%r, index=%d'",
",",
"queue_name",
",",
"task_id",
",",
"message",
",",
"index",
")",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
")"
]
| Updates the heartbeat message for a task. | [
"Updates",
"the",
"heartbeat",
"message",
"for",
"a",
"task",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue_handlers.py#L84-L102 | train |
bslatkin/dpxdt | dpxdt/server/work_queue_handlers.py | handle_finish | def handle_finish(queue_name):
"""Marks a task on a queue as finished."""
task_id = request.form.get('task_id', type=str)
owner = request.form.get('owner', request.remote_addr, type=str)
error = request.form.get('error', type=str) is not None
try:
work_queue.finish(queue_name, task_id, owner, error=error)
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.debug('Task finished: queue=%r, task_id=%r, owner=%r, error=%r',
queue_name, task_id, owner, error)
return flask.jsonify(success=True) | python | def handle_finish(queue_name):
"""Marks a task on a queue as finished."""
task_id = request.form.get('task_id', type=str)
owner = request.form.get('owner', request.remote_addr, type=str)
error = request.form.get('error', type=str) is not None
try:
work_queue.finish(queue_name, task_id, owner, error=error)
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.debug('Task finished: queue=%r, task_id=%r, owner=%r, error=%r',
queue_name, task_id, owner, error)
return flask.jsonify(success=True) | [
"def",
"handle_finish",
"(",
"queue_name",
")",
":",
"task_id",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'task_id'",
",",
"type",
"=",
"str",
")",
"owner",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'owner'",
",",
"request",
".",
"remote_addr",
",",
"type",
"=",
"str",
")",
"error",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'error'",
",",
"type",
"=",
"str",
")",
"is",
"not",
"None",
"try",
":",
"work_queue",
".",
"finish",
"(",
"queue_name",
",",
"task_id",
",",
"owner",
",",
"error",
"=",
"error",
")",
"except",
"work_queue",
".",
"Error",
",",
"e",
":",
"return",
"utils",
".",
"jsonify_error",
"(",
"e",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"logging",
".",
"debug",
"(",
"'Task finished: queue=%r, task_id=%r, owner=%r, error=%r'",
",",
"queue_name",
",",
"task_id",
",",
"owner",
",",
"error",
")",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
")"
]
| Marks a task on a queue as finished. | [
"Marks",
"a",
"task",
"on",
"a",
"queue",
"as",
"finished",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue_handlers.py#L108-L121 | train |
bslatkin/dpxdt | dpxdt/server/work_queue_handlers.py | view_all_work_queues | def view_all_work_queues():
"""Page for viewing the index of all active work queues."""
count_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.count(work_queue.WorkQueue.task_id))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
queue_dict = {}
for name, status, count in count_list:
queue_dict[(name, status)] = dict(
name=name, status=status, count=count)
max_created_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.max(work_queue.WorkQueue.created))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, newest_created in max_created_list:
queue_dict[(name, status)]['newest_created'] = newest_created
min_eta_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.min(work_queue.WorkQueue.eta))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, oldest_eta in min_eta_list:
queue_dict[(name, status)]['oldest_eta'] = oldest_eta
queue_list = list(queue_dict.values())
queue_list.sort(key=lambda x: (x['name'], x['status']))
context = dict(
queue_list=queue_list,
)
return render_template('view_work_queue_index.html', **context) | python | def view_all_work_queues():
"""Page for viewing the index of all active work queues."""
count_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.count(work_queue.WorkQueue.task_id))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
queue_dict = {}
for name, status, count in count_list:
queue_dict[(name, status)] = dict(
name=name, status=status, count=count)
max_created_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.max(work_queue.WorkQueue.created))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, newest_created in max_created_list:
queue_dict[(name, status)]['newest_created'] = newest_created
min_eta_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.min(work_queue.WorkQueue.eta))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, oldest_eta in min_eta_list:
queue_dict[(name, status)]['oldest_eta'] = oldest_eta
queue_list = list(queue_dict.values())
queue_list.sort(key=lambda x: (x['name'], x['status']))
context = dict(
queue_list=queue_list,
)
return render_template('view_work_queue_index.html', **context) | [
"def",
"view_all_work_queues",
"(",
")",
":",
"count_list",
"=",
"list",
"(",
"db",
".",
"session",
".",
"query",
"(",
"work_queue",
".",
"WorkQueue",
".",
"queue_name",
",",
"work_queue",
".",
"WorkQueue",
".",
"status",
",",
"func",
".",
"count",
"(",
"work_queue",
".",
"WorkQueue",
".",
"task_id",
")",
")",
".",
"group_by",
"(",
"work_queue",
".",
"WorkQueue",
".",
"queue_name",
",",
"work_queue",
".",
"WorkQueue",
".",
"status",
")",
")",
"queue_dict",
"=",
"{",
"}",
"for",
"name",
",",
"status",
",",
"count",
"in",
"count_list",
":",
"queue_dict",
"[",
"(",
"name",
",",
"status",
")",
"]",
"=",
"dict",
"(",
"name",
"=",
"name",
",",
"status",
"=",
"status",
",",
"count",
"=",
"count",
")",
"max_created_list",
"=",
"list",
"(",
"db",
".",
"session",
".",
"query",
"(",
"work_queue",
".",
"WorkQueue",
".",
"queue_name",
",",
"work_queue",
".",
"WorkQueue",
".",
"status",
",",
"func",
".",
"max",
"(",
"work_queue",
".",
"WorkQueue",
".",
"created",
")",
")",
".",
"group_by",
"(",
"work_queue",
".",
"WorkQueue",
".",
"queue_name",
",",
"work_queue",
".",
"WorkQueue",
".",
"status",
")",
")",
"for",
"name",
",",
"status",
",",
"newest_created",
"in",
"max_created_list",
":",
"queue_dict",
"[",
"(",
"name",
",",
"status",
")",
"]",
"[",
"'newest_created'",
"]",
"=",
"newest_created",
"min_eta_list",
"=",
"list",
"(",
"db",
".",
"session",
".",
"query",
"(",
"work_queue",
".",
"WorkQueue",
".",
"queue_name",
",",
"work_queue",
".",
"WorkQueue",
".",
"status",
",",
"func",
".",
"min",
"(",
"work_queue",
".",
"WorkQueue",
".",
"eta",
")",
")",
".",
"group_by",
"(",
"work_queue",
".",
"WorkQueue",
".",
"queue_name",
",",
"work_queue",
".",
"WorkQueue",
".",
"status",
")",
")",
"for",
"name",
",",
"status",
",",
"oldest_eta",
"in",
"min_eta_list",
":",
"queue_dict",
"[",
"(",
"name",
",",
"status",
")",
"]",
"[",
"'oldest_eta'",
"]",
"=",
"oldest_eta",
"queue_list",
"=",
"list",
"(",
"queue_dict",
".",
"values",
"(",
")",
")",
"queue_list",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
"[",
"'name'",
"]",
",",
"x",
"[",
"'status'",
"]",
")",
")",
"context",
"=",
"dict",
"(",
"queue_list",
"=",
"queue_list",
",",
")",
"return",
"render_template",
"(",
"'view_work_queue_index.html'",
",",
"*",
"*",
"context",
")"
]
| Page for viewing the index of all active work queues. | [
"Page",
"for",
"viewing",
"the",
"index",
"of",
"all",
"active",
"work",
"queues",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue_handlers.py#L126-L169 | train |
bslatkin/dpxdt | dpxdt/server/work_queue_handlers.py | manage_work_queue | def manage_work_queue(queue_name):
"""Page for viewing the contents of a work queue."""
modify_form = forms.ModifyWorkQueueTaskForm()
if modify_form.validate_on_submit():
primary_key = (modify_form.task_id.data, queue_name)
task = work_queue.WorkQueue.query.get(primary_key)
if task:
logging.info('Action: %s task_id=%r',
modify_form.action.data, modify_form.task_id.data)
if modify_form.action.data == 'retry':
task.status = work_queue.WorkQueue.LIVE
task.lease_attempts = 0
task.heartbeat = 'Retrying ...'
db.session.add(task)
else:
db.session.delete(task)
db.session.commit()
else:
logging.warning('Could not find task_id=%r to delete',
modify_form.task_id.data)
return redirect(url_for('manage_work_queue', queue_name=queue_name))
query = (
work_queue.WorkQueue.query
.filter_by(queue_name=queue_name)
.order_by(work_queue.WorkQueue.created.desc()))
status = request.args.get('status', '', type=str).lower()
if status in work_queue.WorkQueue.STATES:
query = query.filter_by(status=status)
else:
status = None
item_list = list(query.limit(100))
work_list = []
for item in item_list:
form = forms.ModifyWorkQueueTaskForm()
form.task_id.data = item.task_id
form.delete.data = True
work_list.append((item, form))
context = dict(
queue_name=queue_name,
status=status,
work_list=work_list,
)
return render_template('view_work_queue.html', **context) | python | def manage_work_queue(queue_name):
"""Page for viewing the contents of a work queue."""
modify_form = forms.ModifyWorkQueueTaskForm()
if modify_form.validate_on_submit():
primary_key = (modify_form.task_id.data, queue_name)
task = work_queue.WorkQueue.query.get(primary_key)
if task:
logging.info('Action: %s task_id=%r',
modify_form.action.data, modify_form.task_id.data)
if modify_form.action.data == 'retry':
task.status = work_queue.WorkQueue.LIVE
task.lease_attempts = 0
task.heartbeat = 'Retrying ...'
db.session.add(task)
else:
db.session.delete(task)
db.session.commit()
else:
logging.warning('Could not find task_id=%r to delete',
modify_form.task_id.data)
return redirect(url_for('manage_work_queue', queue_name=queue_name))
query = (
work_queue.WorkQueue.query
.filter_by(queue_name=queue_name)
.order_by(work_queue.WorkQueue.created.desc()))
status = request.args.get('status', '', type=str).lower()
if status in work_queue.WorkQueue.STATES:
query = query.filter_by(status=status)
else:
status = None
item_list = list(query.limit(100))
work_list = []
for item in item_list:
form = forms.ModifyWorkQueueTaskForm()
form.task_id.data = item.task_id
form.delete.data = True
work_list.append((item, form))
context = dict(
queue_name=queue_name,
status=status,
work_list=work_list,
)
return render_template('view_work_queue.html', **context) | [
"def",
"manage_work_queue",
"(",
"queue_name",
")",
":",
"modify_form",
"=",
"forms",
".",
"ModifyWorkQueueTaskForm",
"(",
")",
"if",
"modify_form",
".",
"validate_on_submit",
"(",
")",
":",
"primary_key",
"=",
"(",
"modify_form",
".",
"task_id",
".",
"data",
",",
"queue_name",
")",
"task",
"=",
"work_queue",
".",
"WorkQueue",
".",
"query",
".",
"get",
"(",
"primary_key",
")",
"if",
"task",
":",
"logging",
".",
"info",
"(",
"'Action: %s task_id=%r'",
",",
"modify_form",
".",
"action",
".",
"data",
",",
"modify_form",
".",
"task_id",
".",
"data",
")",
"if",
"modify_form",
".",
"action",
".",
"data",
"==",
"'retry'",
":",
"task",
".",
"status",
"=",
"work_queue",
".",
"WorkQueue",
".",
"LIVE",
"task",
".",
"lease_attempts",
"=",
"0",
"task",
".",
"heartbeat",
"=",
"'Retrying ...'",
"db",
".",
"session",
".",
"add",
"(",
"task",
")",
"else",
":",
"db",
".",
"session",
".",
"delete",
"(",
"task",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"'Could not find task_id=%r to delete'",
",",
"modify_form",
".",
"task_id",
".",
"data",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'manage_work_queue'",
",",
"queue_name",
"=",
"queue_name",
")",
")",
"query",
"=",
"(",
"work_queue",
".",
"WorkQueue",
".",
"query",
".",
"filter_by",
"(",
"queue_name",
"=",
"queue_name",
")",
".",
"order_by",
"(",
"work_queue",
".",
"WorkQueue",
".",
"created",
".",
"desc",
"(",
")",
")",
")",
"status",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'status'",
",",
"''",
",",
"type",
"=",
"str",
")",
".",
"lower",
"(",
")",
"if",
"status",
"in",
"work_queue",
".",
"WorkQueue",
".",
"STATES",
":",
"query",
"=",
"query",
".",
"filter_by",
"(",
"status",
"=",
"status",
")",
"else",
":",
"status",
"=",
"None",
"item_list",
"=",
"list",
"(",
"query",
".",
"limit",
"(",
"100",
")",
")",
"work_list",
"=",
"[",
"]",
"for",
"item",
"in",
"item_list",
":",
"form",
"=",
"forms",
".",
"ModifyWorkQueueTaskForm",
"(",
")",
"form",
".",
"task_id",
".",
"data",
"=",
"item",
".",
"task_id",
"form",
".",
"delete",
".",
"data",
"=",
"True",
"work_list",
".",
"append",
"(",
"(",
"item",
",",
"form",
")",
")",
"context",
"=",
"dict",
"(",
"queue_name",
"=",
"queue_name",
",",
"status",
"=",
"status",
",",
"work_list",
"=",
"work_list",
",",
")",
"return",
"render_template",
"(",
"'view_work_queue.html'",
",",
"*",
"*",
"context",
")"
]
| Page for viewing the contents of a work queue. | [
"Page",
"for",
"viewing",
"the",
"contents",
"of",
"a",
"work",
"queue",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue_handlers.py#L174-L220 | train |
bslatkin/dpxdt | dpxdt/server/utils.py | retryable_transaction | def retryable_transaction(attempts=3, exceptions=(OperationalError,)):
"""Decorator retries a function when expected exceptions are raised."""
assert len(exceptions) > 0
assert attempts > 0
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
for i in xrange(attempts):
try:
return f(*args, **kwargs)
except exceptions, e:
if i == (attempts - 1):
raise
logging.warning(
'Retryable error in transaction on attempt %d. %s: %s',
i + 1, e.__class__.__name__, e)
db.session.rollback()
return wrapped
return wrapper | python | def retryable_transaction(attempts=3, exceptions=(OperationalError,)):
"""Decorator retries a function when expected exceptions are raised."""
assert len(exceptions) > 0
assert attempts > 0
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
for i in xrange(attempts):
try:
return f(*args, **kwargs)
except exceptions, e:
if i == (attempts - 1):
raise
logging.warning(
'Retryable error in transaction on attempt %d. %s: %s',
i + 1, e.__class__.__name__, e)
db.session.rollback()
return wrapped
return wrapper | [
"def",
"retryable_transaction",
"(",
"attempts",
"=",
"3",
",",
"exceptions",
"=",
"(",
"OperationalError",
",",
")",
")",
":",
"assert",
"len",
"(",
"exceptions",
")",
">",
"0",
"assert",
"attempts",
">",
"0",
"def",
"wrapper",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"attempts",
")",
":",
"try",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"exceptions",
",",
"e",
":",
"if",
"i",
"==",
"(",
"attempts",
"-",
"1",
")",
":",
"raise",
"logging",
".",
"warning",
"(",
"'Retryable error in transaction on attempt %d. %s: %s'",
",",
"i",
"+",
"1",
",",
"e",
".",
"__class__",
".",
"__name__",
",",
"e",
")",
"db",
".",
"session",
".",
"rollback",
"(",
")",
"return",
"wrapped",
"return",
"wrapper"
]
| Decorator retries a function when expected exceptions are raised. | [
"Decorator",
"retries",
"a",
"function",
"when",
"expected",
"exceptions",
"are",
"raised",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/utils.py#L37-L58 | train |
bslatkin/dpxdt | dpxdt/server/utils.py | jsonify_assert | def jsonify_assert(asserted, message, status_code=400):
"""Asserts something is true, aborts the request if not."""
if asserted:
return
try:
raise AssertionError(message)
except AssertionError, e:
stack = traceback.extract_stack()
stack.pop()
logging.error('Assertion failed: %s\n%s',
str(e), ''.join(traceback.format_list(stack)))
abort(jsonify_error(e, status_code=status_code)) | python | def jsonify_assert(asserted, message, status_code=400):
"""Asserts something is true, aborts the request if not."""
if asserted:
return
try:
raise AssertionError(message)
except AssertionError, e:
stack = traceback.extract_stack()
stack.pop()
logging.error('Assertion failed: %s\n%s',
str(e), ''.join(traceback.format_list(stack)))
abort(jsonify_error(e, status_code=status_code)) | [
"def",
"jsonify_assert",
"(",
"asserted",
",",
"message",
",",
"status_code",
"=",
"400",
")",
":",
"if",
"asserted",
":",
"return",
"try",
":",
"raise",
"AssertionError",
"(",
"message",
")",
"except",
"AssertionError",
",",
"e",
":",
"stack",
"=",
"traceback",
".",
"extract_stack",
"(",
")",
"stack",
".",
"pop",
"(",
")",
"logging",
".",
"error",
"(",
"'Assertion failed: %s\\n%s'",
",",
"str",
"(",
"e",
")",
",",
"''",
".",
"join",
"(",
"traceback",
".",
"format_list",
"(",
"stack",
")",
")",
")",
"abort",
"(",
"jsonify_error",
"(",
"e",
",",
"status_code",
"=",
"status_code",
")",
")"
]
| Asserts something is true, aborts the request if not. | [
"Asserts",
"something",
"is",
"true",
"aborts",
"the",
"request",
"if",
"not",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/utils.py#L61-L72 | train |
bslatkin/dpxdt | dpxdt/server/utils.py | jsonify_error | def jsonify_error(message_or_exception, status_code=400):
"""Returns a JSON payload that indicates the request had an error."""
if isinstance(message_or_exception, Exception):
message = '%s: %s' % (
message_or_exception.__class__.__name__, message_or_exception)
else:
message = message_or_exception
logging.debug('Returning status=%s, error message: %s',
status_code, message)
response = jsonify(error=message)
response.status_code = status_code
return response | python | def jsonify_error(message_or_exception, status_code=400):
"""Returns a JSON payload that indicates the request had an error."""
if isinstance(message_or_exception, Exception):
message = '%s: %s' % (
message_or_exception.__class__.__name__, message_or_exception)
else:
message = message_or_exception
logging.debug('Returning status=%s, error message: %s',
status_code, message)
response = jsonify(error=message)
response.status_code = status_code
return response | [
"def",
"jsonify_error",
"(",
"message_or_exception",
",",
"status_code",
"=",
"400",
")",
":",
"if",
"isinstance",
"(",
"message_or_exception",
",",
"Exception",
")",
":",
"message",
"=",
"'%s: %s'",
"%",
"(",
"message_or_exception",
".",
"__class__",
".",
"__name__",
",",
"message_or_exception",
")",
"else",
":",
"message",
"=",
"message_or_exception",
"logging",
".",
"debug",
"(",
"'Returning status=%s, error message: %s'",
",",
"status_code",
",",
"message",
")",
"response",
"=",
"jsonify",
"(",
"error",
"=",
"message",
")",
"response",
".",
"status_code",
"=",
"status_code",
"return",
"response"
]
| Returns a JSON payload that indicates the request had an error. | [
"Returns",
"a",
"JSON",
"payload",
"that",
"indicates",
"the",
"request",
"had",
"an",
"error",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/utils.py#L75-L87 | train |
bslatkin/dpxdt | dpxdt/server/utils.py | ignore_exceptions | def ignore_exceptions(f):
"""Decorator catches and ignores any exceptions raised by this function."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
logging.exception("Ignoring exception in %r", f)
return wrapped | python | def ignore_exceptions(f):
"""Decorator catches and ignores any exceptions raised by this function."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
logging.exception("Ignoring exception in %r", f)
return wrapped | [
"def",
"ignore_exceptions",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"logging",
".",
"exception",
"(",
"\"Ignoring exception in %r\"",
",",
"f",
")",
"return",
"wrapped"
]
| Decorator catches and ignores any exceptions raised by this function. | [
"Decorator",
"catches",
"and",
"ignores",
"any",
"exceptions",
"raised",
"by",
"this",
"function",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/utils.py#L90-L98 | train |
bslatkin/dpxdt | dpxdt/server/utils.py | timesince | def timesince(when):
"""Returns string representing "time since" or "time until".
Examples:
3 days ago, 5 hours ago, 3 minutes from now, 5 hours from now, now.
"""
if not when:
return ''
now = datetime.datetime.utcnow()
if now > when:
diff = now - when
suffix = 'ago'
else:
diff = when - now
suffix = 'from now'
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if period:
return '%d %s %s' % (
period,
singular if period == 1 else plural,
suffix)
return 'now' | python | def timesince(when):
"""Returns string representing "time since" or "time until".
Examples:
3 days ago, 5 hours ago, 3 minutes from now, 5 hours from now, now.
"""
if not when:
return ''
now = datetime.datetime.utcnow()
if now > when:
diff = now - when
suffix = 'ago'
else:
diff = when - now
suffix = 'from now'
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if period:
return '%d %s %s' % (
period,
singular if period == 1 else plural,
suffix)
return 'now' | [
"def",
"timesince",
"(",
"when",
")",
":",
"if",
"not",
"when",
":",
"return",
"''",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"now",
">",
"when",
":",
"diff",
"=",
"now",
"-",
"when",
"suffix",
"=",
"'ago'",
"else",
":",
"diff",
"=",
"when",
"-",
"now",
"suffix",
"=",
"'from now'",
"periods",
"=",
"(",
"(",
"diff",
".",
"days",
"/",
"365",
",",
"'year'",
",",
"'years'",
")",
",",
"(",
"diff",
".",
"days",
"/",
"30",
",",
"'month'",
",",
"'months'",
")",
",",
"(",
"diff",
".",
"days",
"/",
"7",
",",
"'week'",
",",
"'weeks'",
")",
",",
"(",
"diff",
".",
"days",
",",
"'day'",
",",
"'days'",
")",
",",
"(",
"diff",
".",
"seconds",
"/",
"3600",
",",
"'hour'",
",",
"'hours'",
")",
",",
"(",
"diff",
".",
"seconds",
"/",
"60",
",",
"'minute'",
",",
"'minutes'",
")",
",",
"(",
"diff",
".",
"seconds",
",",
"'second'",
",",
"'seconds'",
")",
",",
")",
"for",
"period",
",",
"singular",
",",
"plural",
"in",
"periods",
":",
"if",
"period",
":",
"return",
"'%d %s %s'",
"%",
"(",
"period",
",",
"singular",
"if",
"period",
"==",
"1",
"else",
"plural",
",",
"suffix",
")",
"return",
"'now'"
]
| Returns string representing "time since" or "time until".
Examples:
3 days ago, 5 hours ago, 3 minutes from now, 5 hours from now, now. | [
"Returns",
"string",
"representing",
"time",
"since",
"or",
"time",
"until",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/utils.py#L103-L137 | train |
bslatkin/dpxdt | dpxdt/server/utils.py | human_uuid | def human_uuid():
"""Returns a good UUID for using as a human readable string."""
return base64.b32encode(
hashlib.sha1(uuid.uuid4().bytes).digest()).lower().strip('=') | python | def human_uuid():
"""Returns a good UUID for using as a human readable string."""
return base64.b32encode(
hashlib.sha1(uuid.uuid4().bytes).digest()).lower().strip('=') | [
"def",
"human_uuid",
"(",
")",
":",
"return",
"base64",
".",
"b32encode",
"(",
"hashlib",
".",
"sha1",
"(",
"uuid",
".",
"uuid4",
"(",
")",
".",
"bytes",
")",
".",
"digest",
"(",
")",
")",
".",
"lower",
"(",
")",
".",
"strip",
"(",
"'='",
")"
]
| Returns a good UUID for using as a human readable string. | [
"Returns",
"a",
"good",
"UUID",
"for",
"using",
"as",
"a",
"human",
"readable",
"string",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/utils.py#L140-L143 | train |
bslatkin/dpxdt | dpxdt/server/utils.py | get_deployment_timestamp | def get_deployment_timestamp():
"""Returns a unique string represeting the current deployment.
Used for busting caches.
"""
# TODO: Support other deployment situations.
if os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Engine'):
version_id = os.environ.get('CURRENT_VERSION_ID')
major_version, timestamp = version_id.split('.', 1)
return timestamp
return 'test' | python | def get_deployment_timestamp():
"""Returns a unique string represeting the current deployment.
Used for busting caches.
"""
# TODO: Support other deployment situations.
if os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Engine'):
version_id = os.environ.get('CURRENT_VERSION_ID')
major_version, timestamp = version_id.split('.', 1)
return timestamp
return 'test' | [
"def",
"get_deployment_timestamp",
"(",
")",
":",
"# TODO: Support other deployment situations.",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'SERVER_SOFTWARE'",
",",
"''",
")",
".",
"startswith",
"(",
"'Google App Engine'",
")",
":",
"version_id",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'CURRENT_VERSION_ID'",
")",
"major_version",
",",
"timestamp",
"=",
"version_id",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"return",
"timestamp",
"return",
"'test'"
]
| Returns a unique string represeting the current deployment.
Used for busting caches. | [
"Returns",
"a",
"unique",
"string",
"represeting",
"the",
"current",
"deployment",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/utils.py#L163-L173 | train |
bslatkin/dpxdt | dpxdt/tools/url_pair_diff.py | real_main | def real_main(new_url=None,
baseline_url=None,
upload_build_id=None,
upload_release_name=None):
"""Runs the ur_pair_diff."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
item = UrlPairDiff(
new_url,
baseline_url,
upload_build_id,
upload_release_name=upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | python | def real_main(new_url=None,
baseline_url=None,
upload_build_id=None,
upload_release_name=None):
"""Runs the ur_pair_diff."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
item = UrlPairDiff(
new_url,
baseline_url,
upload_build_id,
upload_release_name=upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | [
"def",
"real_main",
"(",
"new_url",
"=",
"None",
",",
"baseline_url",
"=",
"None",
",",
"upload_build_id",
"=",
"None",
",",
"upload_release_name",
"=",
"None",
")",
":",
"coordinator",
"=",
"workers",
".",
"get_coordinator",
"(",
")",
"fetch_worker",
".",
"register",
"(",
"coordinator",
")",
"coordinator",
".",
"start",
"(",
")",
"item",
"=",
"UrlPairDiff",
"(",
"new_url",
",",
"baseline_url",
",",
"upload_build_id",
",",
"upload_release_name",
"=",
"upload_release_name",
",",
"heartbeat",
"=",
"workers",
".",
"PrintWorkflow",
")",
"item",
".",
"root",
"=",
"True",
"coordinator",
".",
"input_queue",
".",
"put",
"(",
"item",
")",
"coordinator",
".",
"wait_one",
"(",
")",
"coordinator",
".",
"stop",
"(",
")",
"coordinator",
".",
"join",
"(",
")"
]
| Runs the ur_pair_diff. | [
"Runs",
"the",
"ur_pair_diff",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/tools/url_pair_diff.py#L132-L152 | train |
bslatkin/dpxdt | dpxdt/client/fetch_worker.py | fetch_internal | def fetch_internal(item, request):
"""Fetches the given request by using the local Flask context."""
# Break client dependence on Flask if internal fetches aren't being used.
from flask import make_response
from werkzeug.test import EnvironBuilder
# Break circular dependencies.
from dpxdt.server import app
# Attempt to create a Flask environment from a urllib2.Request object.
environ_base = {
'REMOTE_ADDR': '127.0.0.1',
}
# The data object may be a generator from poster.multipart_encode, so we
# need to convert that to raw bytes here. Unfortunately EnvironBuilder
# only works with the whole request buffered in memory.
data = request.get_data()
if data and not isinstance(data, str):
data = ''.join(list(data))
builder = EnvironBuilder(
path=request.get_selector(),
base_url='%s://%s' % (request.get_type(), request.get_host()),
method=request.get_method(),
data=data,
headers=request.header_items(),
environ_base=environ_base)
with app.request_context(builder.get_environ()):
response = make_response(app.dispatch_request())
LOGGER.info('"%s" %s via internal routing',
request.get_selector(), response.status_code)
item.status_code = response.status_code
item.content_type = response.mimetype
if item.result_path:
# TODO: Is there a better way to access the response stream?
with open(item.result_path, 'wb') as result_file:
for piece in response.iter_encoded():
result_file.write(piece)
else:
item.data = response.get_data()
return item | python | def fetch_internal(item, request):
"""Fetches the given request by using the local Flask context."""
# Break client dependence on Flask if internal fetches aren't being used.
from flask import make_response
from werkzeug.test import EnvironBuilder
# Break circular dependencies.
from dpxdt.server import app
# Attempt to create a Flask environment from a urllib2.Request object.
environ_base = {
'REMOTE_ADDR': '127.0.0.1',
}
# The data object may be a generator from poster.multipart_encode, so we
# need to convert that to raw bytes here. Unfortunately EnvironBuilder
# only works with the whole request buffered in memory.
data = request.get_data()
if data and not isinstance(data, str):
data = ''.join(list(data))
builder = EnvironBuilder(
path=request.get_selector(),
base_url='%s://%s' % (request.get_type(), request.get_host()),
method=request.get_method(),
data=data,
headers=request.header_items(),
environ_base=environ_base)
with app.request_context(builder.get_environ()):
response = make_response(app.dispatch_request())
LOGGER.info('"%s" %s via internal routing',
request.get_selector(), response.status_code)
item.status_code = response.status_code
item.content_type = response.mimetype
if item.result_path:
# TODO: Is there a better way to access the response stream?
with open(item.result_path, 'wb') as result_file:
for piece in response.iter_encoded():
result_file.write(piece)
else:
item.data = response.get_data()
return item | [
"def",
"fetch_internal",
"(",
"item",
",",
"request",
")",
":",
"# Break client dependence on Flask if internal fetches aren't being used.",
"from",
"flask",
"import",
"make_response",
"from",
"werkzeug",
".",
"test",
"import",
"EnvironBuilder",
"# Break circular dependencies.",
"from",
"dpxdt",
".",
"server",
"import",
"app",
"# Attempt to create a Flask environment from a urllib2.Request object.",
"environ_base",
"=",
"{",
"'REMOTE_ADDR'",
":",
"'127.0.0.1'",
",",
"}",
"# The data object may be a generator from poster.multipart_encode, so we",
"# need to convert that to raw bytes here. Unfortunately EnvironBuilder",
"# only works with the whole request buffered in memory.",
"data",
"=",
"request",
".",
"get_data",
"(",
")",
"if",
"data",
"and",
"not",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"data",
"=",
"''",
".",
"join",
"(",
"list",
"(",
"data",
")",
")",
"builder",
"=",
"EnvironBuilder",
"(",
"path",
"=",
"request",
".",
"get_selector",
"(",
")",
",",
"base_url",
"=",
"'%s://%s'",
"%",
"(",
"request",
".",
"get_type",
"(",
")",
",",
"request",
".",
"get_host",
"(",
")",
")",
",",
"method",
"=",
"request",
".",
"get_method",
"(",
")",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"request",
".",
"header_items",
"(",
")",
",",
"environ_base",
"=",
"environ_base",
")",
"with",
"app",
".",
"request_context",
"(",
"builder",
".",
"get_environ",
"(",
")",
")",
":",
"response",
"=",
"make_response",
"(",
"app",
".",
"dispatch_request",
"(",
")",
")",
"LOGGER",
".",
"info",
"(",
"'\"%s\" %s via internal routing'",
",",
"request",
".",
"get_selector",
"(",
")",
",",
"response",
".",
"status_code",
")",
"item",
".",
"status_code",
"=",
"response",
".",
"status_code",
"item",
".",
"content_type",
"=",
"response",
".",
"mimetype",
"if",
"item",
".",
"result_path",
":",
"# TODO: Is there a better way to access the response stream?",
"with",
"open",
"(",
"item",
".",
"result_path",
",",
"'wb'",
")",
"as",
"result_file",
":",
"for",
"piece",
"in",
"response",
".",
"iter_encoded",
"(",
")",
":",
"result_file",
".",
"write",
"(",
"piece",
")",
"else",
":",
"item",
".",
"data",
"=",
"response",
".",
"get_data",
"(",
")",
"return",
"item"
]
| Fetches the given request by using the local Flask context. | [
"Fetches",
"the",
"given",
"request",
"by",
"using",
"the",
"local",
"Flask",
"context",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/fetch_worker.py#L118-L160 | train |
bslatkin/dpxdt | dpxdt/client/fetch_worker.py | fetch_normal | def fetch_normal(item, request):
"""Fetches the given request over HTTP."""
try:
conn = urllib2.urlopen(request, timeout=item.timeout_seconds)
except urllib2.HTTPError, e:
conn = e
except (urllib2.URLError, ssl.SSLError), e:
# TODO: Make this status more clear
item.status_code = 400
return item
try:
item.status_code = conn.getcode()
item.content_type = conn.info().gettype()
if item.result_path:
with open(item.result_path, 'wb') as result_file:
shutil.copyfileobj(conn, result_file)
else:
item.data = conn.read()
except socket.timeout, e:
# TODO: Make this status more clear
item.status_code = 400
return item
finally:
conn.close()
return item | python | def fetch_normal(item, request):
"""Fetches the given request over HTTP."""
try:
conn = urllib2.urlopen(request, timeout=item.timeout_seconds)
except urllib2.HTTPError, e:
conn = e
except (urllib2.URLError, ssl.SSLError), e:
# TODO: Make this status more clear
item.status_code = 400
return item
try:
item.status_code = conn.getcode()
item.content_type = conn.info().gettype()
if item.result_path:
with open(item.result_path, 'wb') as result_file:
shutil.copyfileobj(conn, result_file)
else:
item.data = conn.read()
except socket.timeout, e:
# TODO: Make this status more clear
item.status_code = 400
return item
finally:
conn.close()
return item | [
"def",
"fetch_normal",
"(",
"item",
",",
"request",
")",
":",
"try",
":",
"conn",
"=",
"urllib2",
".",
"urlopen",
"(",
"request",
",",
"timeout",
"=",
"item",
".",
"timeout_seconds",
")",
"except",
"urllib2",
".",
"HTTPError",
",",
"e",
":",
"conn",
"=",
"e",
"except",
"(",
"urllib2",
".",
"URLError",
",",
"ssl",
".",
"SSLError",
")",
",",
"e",
":",
"# TODO: Make this status more clear",
"item",
".",
"status_code",
"=",
"400",
"return",
"item",
"try",
":",
"item",
".",
"status_code",
"=",
"conn",
".",
"getcode",
"(",
")",
"item",
".",
"content_type",
"=",
"conn",
".",
"info",
"(",
")",
".",
"gettype",
"(",
")",
"if",
"item",
".",
"result_path",
":",
"with",
"open",
"(",
"item",
".",
"result_path",
",",
"'wb'",
")",
"as",
"result_file",
":",
"shutil",
".",
"copyfileobj",
"(",
"conn",
",",
"result_file",
")",
"else",
":",
"item",
".",
"data",
"=",
"conn",
".",
"read",
"(",
")",
"except",
"socket",
".",
"timeout",
",",
"e",
":",
"# TODO: Make this status more clear",
"item",
".",
"status_code",
"=",
"400",
"return",
"item",
"finally",
":",
"conn",
".",
"close",
"(",
")",
"return",
"item"
]
| Fetches the given request over HTTP. | [
"Fetches",
"the",
"given",
"request",
"over",
"HTTP",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/fetch_worker.py#L163-L189 | train |
bslatkin/dpxdt | dpxdt/client/fetch_worker.py | FetchItem.json | def json(self):
"""Returns de-JSONed data or None if it's a different content type."""
if self._data_json:
return self._data_json
if not self.data or self.content_type != 'application/json':
return None
self._data_json = json.loads(self.data)
return self._data_json | python | def json(self):
"""Returns de-JSONed data or None if it's a different content type."""
if self._data_json:
return self._data_json
if not self.data or self.content_type != 'application/json':
return None
self._data_json = json.loads(self.data)
return self._data_json | [
"def",
"json",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data_json",
":",
"return",
"self",
".",
"_data_json",
"if",
"not",
"self",
".",
"data",
"or",
"self",
".",
"content_type",
"!=",
"'application/json'",
":",
"return",
"None",
"self",
".",
"_data_json",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"data",
")",
"return",
"self",
".",
"_data_json"
]
| Returns de-JSONed data or None if it's a different content type. | [
"Returns",
"de",
"-",
"JSONed",
"data",
"or",
"None",
"if",
"it",
"s",
"a",
"different",
"content",
"type",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/fetch_worker.py#L106-L115 | train |
bslatkin/dpxdt | dpxdt/tools/local_pdiff.py | CaptureAndDiffWorkflowItem.maybe_imgur | def maybe_imgur(self, path):
'''Uploads a file to imgur if requested via command line flags.
Returns either "path" or "path url" depending on the course of action.
'''
if not FLAGS.imgur_client_id:
return path
im = pyimgur.Imgur(FLAGS.imgur_client_id)
uploaded_image = im.upload_image(path)
return '%s %s' % (path, uploaded_image.link) | python | def maybe_imgur(self, path):
'''Uploads a file to imgur if requested via command line flags.
Returns either "path" or "path url" depending on the course of action.
'''
if not FLAGS.imgur_client_id:
return path
im = pyimgur.Imgur(FLAGS.imgur_client_id)
uploaded_image = im.upload_image(path)
return '%s %s' % (path, uploaded_image.link) | [
"def",
"maybe_imgur",
"(",
"self",
",",
"path",
")",
":",
"if",
"not",
"FLAGS",
".",
"imgur_client_id",
":",
"return",
"path",
"im",
"=",
"pyimgur",
".",
"Imgur",
"(",
"FLAGS",
".",
"imgur_client_id",
")",
"uploaded_image",
"=",
"im",
".",
"upload_image",
"(",
"path",
")",
"return",
"'%s %s'",
"%",
"(",
"path",
",",
"uploaded_image",
".",
"link",
")"
]
| Uploads a file to imgur if requested via command line flags.
Returns either "path" or "path url" depending on the course of action. | [
"Uploads",
"a",
"file",
"to",
"imgur",
"if",
"requested",
"via",
"command",
"line",
"flags",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/tools/local_pdiff.py#L254-L264 | train |
bslatkin/dpxdt | dpxdt/tools/diff_my_images.py | real_main | def real_main(release_url=None,
tests_json_path=None,
upload_build_id=None,
upload_release_name=None):
"""Runs diff_my_images."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
data = open(FLAGS.tests_json_path).read()
tests = load_tests(data)
item = DiffMyImages(
release_url,
tests,
upload_build_id,
upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | python | def real_main(release_url=None,
tests_json_path=None,
upload_build_id=None,
upload_release_name=None):
"""Runs diff_my_images."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
data = open(FLAGS.tests_json_path).read()
tests = load_tests(data)
item = DiffMyImages(
release_url,
tests,
upload_build_id,
upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | [
"def",
"real_main",
"(",
"release_url",
"=",
"None",
",",
"tests_json_path",
"=",
"None",
",",
"upload_build_id",
"=",
"None",
",",
"upload_release_name",
"=",
"None",
")",
":",
"coordinator",
"=",
"workers",
".",
"get_coordinator",
"(",
")",
"fetch_worker",
".",
"register",
"(",
"coordinator",
")",
"coordinator",
".",
"start",
"(",
")",
"data",
"=",
"open",
"(",
"FLAGS",
".",
"tests_json_path",
")",
".",
"read",
"(",
")",
"tests",
"=",
"load_tests",
"(",
"data",
")",
"item",
"=",
"DiffMyImages",
"(",
"release_url",
",",
"tests",
",",
"upload_build_id",
",",
"upload_release_name",
",",
"heartbeat",
"=",
"workers",
".",
"PrintWorkflow",
")",
"item",
".",
"root",
"=",
"True",
"coordinator",
".",
"input_queue",
".",
"put",
"(",
"item",
")",
"coordinator",
".",
"wait_one",
"(",
")",
"coordinator",
".",
"stop",
"(",
")",
"coordinator",
".",
"join",
"(",
")"
]
| Runs diff_my_images. | [
"Runs",
"diff_my_images",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/tools/diff_my_images.py#L175-L198 | train |
bslatkin/dpxdt | dpxdt/tools/site_diff.py | clean_url | def clean_url(url, force_scheme=None):
"""Cleans the given URL."""
# URL should be ASCII according to RFC 3986
url = str(url)
# Collapse ../../ and related
url_parts = urlparse.urlparse(url)
path_parts = []
for part in url_parts.path.split('/'):
if part == '.':
continue
elif part == '..':
if path_parts:
path_parts.pop()
else:
path_parts.append(part)
url_parts = list(url_parts)
if force_scheme:
url_parts[0] = force_scheme
url_parts[2] = '/'.join(path_parts)
if FLAGS.keep_query_string == False:
url_parts[4] = '' # No query string
url_parts[5] = '' # No path
# Always have a trailing slash
if not url_parts[2]:
url_parts[2] = '/'
return urlparse.urlunparse(url_parts) | python | def clean_url(url, force_scheme=None):
"""Cleans the given URL."""
# URL should be ASCII according to RFC 3986
url = str(url)
# Collapse ../../ and related
url_parts = urlparse.urlparse(url)
path_parts = []
for part in url_parts.path.split('/'):
if part == '.':
continue
elif part == '..':
if path_parts:
path_parts.pop()
else:
path_parts.append(part)
url_parts = list(url_parts)
if force_scheme:
url_parts[0] = force_scheme
url_parts[2] = '/'.join(path_parts)
if FLAGS.keep_query_string == False:
url_parts[4] = '' # No query string
url_parts[5] = '' # No path
# Always have a trailing slash
if not url_parts[2]:
url_parts[2] = '/'
return urlparse.urlunparse(url_parts) | [
"def",
"clean_url",
"(",
"url",
",",
"force_scheme",
"=",
"None",
")",
":",
"# URL should be ASCII according to RFC 3986",
"url",
"=",
"str",
"(",
"url",
")",
"# Collapse ../../ and related",
"url_parts",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"path_parts",
"=",
"[",
"]",
"for",
"part",
"in",
"url_parts",
".",
"path",
".",
"split",
"(",
"'/'",
")",
":",
"if",
"part",
"==",
"'.'",
":",
"continue",
"elif",
"part",
"==",
"'..'",
":",
"if",
"path_parts",
":",
"path_parts",
".",
"pop",
"(",
")",
"else",
":",
"path_parts",
".",
"append",
"(",
"part",
")",
"url_parts",
"=",
"list",
"(",
"url_parts",
")",
"if",
"force_scheme",
":",
"url_parts",
"[",
"0",
"]",
"=",
"force_scheme",
"url_parts",
"[",
"2",
"]",
"=",
"'/'",
".",
"join",
"(",
"path_parts",
")",
"if",
"FLAGS",
".",
"keep_query_string",
"==",
"False",
":",
"url_parts",
"[",
"4",
"]",
"=",
"''",
"# No query string",
"url_parts",
"[",
"5",
"]",
"=",
"''",
"# No path",
"# Always have a trailing slash",
"if",
"not",
"url_parts",
"[",
"2",
"]",
":",
"url_parts",
"[",
"2",
"]",
"=",
"'/'",
"return",
"urlparse",
".",
"urlunparse",
"(",
"url_parts",
")"
]
| Cleans the given URL. | [
"Cleans",
"the",
"given",
"URL",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/tools/site_diff.py#L105-L135 | train |
bslatkin/dpxdt | dpxdt/tools/site_diff.py | extract_urls | def extract_urls(url, data, unescape=HTMLParser.HTMLParser().unescape):
"""Extracts the URLs from an HTML document."""
parts = urlparse.urlparse(url)
prefix = '%s://%s' % (parts.scheme, parts.netloc)
accessed_dir = os.path.dirname(parts.path)
if not accessed_dir.endswith('/'):
accessed_dir += '/'
for pattern, replacement in REPLACEMENT_REGEXES:
fixed = replacement % {
'base': prefix,
'accessed_dir': accessed_dir,
}
data = re.sub(pattern, fixed, data)
result = set()
for match in re.finditer(MAYBE_HTML_URL_REGEX, data):
found_url = unescape(match.groupdict()['absurl'])
found_url = clean_url(
found_url,
force_scheme=parts[0]) # Use the main page's scheme
result.add(found_url)
return result | python | def extract_urls(url, data, unescape=HTMLParser.HTMLParser().unescape):
"""Extracts the URLs from an HTML document."""
parts = urlparse.urlparse(url)
prefix = '%s://%s' % (parts.scheme, parts.netloc)
accessed_dir = os.path.dirname(parts.path)
if not accessed_dir.endswith('/'):
accessed_dir += '/'
for pattern, replacement in REPLACEMENT_REGEXES:
fixed = replacement % {
'base': prefix,
'accessed_dir': accessed_dir,
}
data = re.sub(pattern, fixed, data)
result = set()
for match in re.finditer(MAYBE_HTML_URL_REGEX, data):
found_url = unescape(match.groupdict()['absurl'])
found_url = clean_url(
found_url,
force_scheme=parts[0]) # Use the main page's scheme
result.add(found_url)
return result | [
"def",
"extract_urls",
"(",
"url",
",",
"data",
",",
"unescape",
"=",
"HTMLParser",
".",
"HTMLParser",
"(",
")",
".",
"unescape",
")",
":",
"parts",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"prefix",
"=",
"'%s://%s'",
"%",
"(",
"parts",
".",
"scheme",
",",
"parts",
".",
"netloc",
")",
"accessed_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"parts",
".",
"path",
")",
"if",
"not",
"accessed_dir",
".",
"endswith",
"(",
"'/'",
")",
":",
"accessed_dir",
"+=",
"'/'",
"for",
"pattern",
",",
"replacement",
"in",
"REPLACEMENT_REGEXES",
":",
"fixed",
"=",
"replacement",
"%",
"{",
"'base'",
":",
"prefix",
",",
"'accessed_dir'",
":",
"accessed_dir",
",",
"}",
"data",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"fixed",
",",
"data",
")",
"result",
"=",
"set",
"(",
")",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"MAYBE_HTML_URL_REGEX",
",",
"data",
")",
":",
"found_url",
"=",
"unescape",
"(",
"match",
".",
"groupdict",
"(",
")",
"[",
"'absurl'",
"]",
")",
"found_url",
"=",
"clean_url",
"(",
"found_url",
",",
"force_scheme",
"=",
"parts",
"[",
"0",
"]",
")",
"# Use the main page's scheme",
"result",
".",
"add",
"(",
"found_url",
")",
"return",
"result"
]
| Extracts the URLs from an HTML document. | [
"Extracts",
"the",
"URLs",
"from",
"an",
"HTML",
"document",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/tools/site_diff.py#L138-L162 | train |
bslatkin/dpxdt | dpxdt/tools/site_diff.py | prune_urls | def prune_urls(url_set, start_url, allowed_list, ignored_list):
"""Prunes URLs that should be ignored."""
result = set()
for url in url_set:
allowed = False
for allow_url in allowed_list:
if url.startswith(allow_url):
allowed = True
break
if not allowed:
continue
ignored = False
for ignore_url in ignored_list:
if url.startswith(ignore_url):
ignored = True
break
if ignored:
continue
prefix, suffix = (url.rsplit('.', 1) + [''])[:2]
if suffix.lower() in IGNORE_SUFFIXES:
continue
result.add(url)
return result | python | def prune_urls(url_set, start_url, allowed_list, ignored_list):
"""Prunes URLs that should be ignored."""
result = set()
for url in url_set:
allowed = False
for allow_url in allowed_list:
if url.startswith(allow_url):
allowed = True
break
if not allowed:
continue
ignored = False
for ignore_url in ignored_list:
if url.startswith(ignore_url):
ignored = True
break
if ignored:
continue
prefix, suffix = (url.rsplit('.', 1) + [''])[:2]
if suffix.lower() in IGNORE_SUFFIXES:
continue
result.add(url)
return result | [
"def",
"prune_urls",
"(",
"url_set",
",",
"start_url",
",",
"allowed_list",
",",
"ignored_list",
")",
":",
"result",
"=",
"set",
"(",
")",
"for",
"url",
"in",
"url_set",
":",
"allowed",
"=",
"False",
"for",
"allow_url",
"in",
"allowed_list",
":",
"if",
"url",
".",
"startswith",
"(",
"allow_url",
")",
":",
"allowed",
"=",
"True",
"break",
"if",
"not",
"allowed",
":",
"continue",
"ignored",
"=",
"False",
"for",
"ignore_url",
"in",
"ignored_list",
":",
"if",
"url",
".",
"startswith",
"(",
"ignore_url",
")",
":",
"ignored",
"=",
"True",
"break",
"if",
"ignored",
":",
"continue",
"prefix",
",",
"suffix",
"=",
"(",
"url",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"+",
"[",
"''",
"]",
")",
"[",
":",
"2",
"]",
"if",
"suffix",
".",
"lower",
"(",
")",
"in",
"IGNORE_SUFFIXES",
":",
"continue",
"result",
".",
"add",
"(",
"url",
")",
"return",
"result"
]
| Prunes URLs that should be ignored. | [
"Prunes",
"URLs",
"that",
"should",
"be",
"ignored",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/tools/site_diff.py#L169-L198 | train |
bslatkin/dpxdt | dpxdt/tools/site_diff.py | real_main | def real_main(start_url=None,
ignore_prefixes=None,
upload_build_id=None,
upload_release_name=None):
"""Runs the site_diff."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
item = SiteDiff(
start_url=start_url,
ignore_prefixes=ignore_prefixes,
upload_build_id=upload_build_id,
upload_release_name=upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | python | def real_main(start_url=None,
ignore_prefixes=None,
upload_build_id=None,
upload_release_name=None):
"""Runs the site_diff."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
item = SiteDiff(
start_url=start_url,
ignore_prefixes=ignore_prefixes,
upload_build_id=upload_build_id,
upload_release_name=upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | [
"def",
"real_main",
"(",
"start_url",
"=",
"None",
",",
"ignore_prefixes",
"=",
"None",
",",
"upload_build_id",
"=",
"None",
",",
"upload_release_name",
"=",
"None",
")",
":",
"coordinator",
"=",
"workers",
".",
"get_coordinator",
"(",
")",
"fetch_worker",
".",
"register",
"(",
"coordinator",
")",
"coordinator",
".",
"start",
"(",
")",
"item",
"=",
"SiteDiff",
"(",
"start_url",
"=",
"start_url",
",",
"ignore_prefixes",
"=",
"ignore_prefixes",
",",
"upload_build_id",
"=",
"upload_build_id",
",",
"upload_release_name",
"=",
"upload_release_name",
",",
"heartbeat",
"=",
"workers",
".",
"PrintWorkflow",
")",
"item",
".",
"root",
"=",
"True",
"coordinator",
".",
"input_queue",
".",
"put",
"(",
"item",
")",
"coordinator",
".",
"wait_one",
"(",
")",
"coordinator",
".",
"stop",
"(",
")",
"coordinator",
".",
"join",
"(",
")"
]
| Runs the site_diff. | [
"Runs",
"the",
"site_diff",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/tools/site_diff.py#L328-L348 | train |
bslatkin/dpxdt | dpxdt/server/emails.py | render_or_send | def render_or_send(func, message):
"""Renders an email message for debugging or actually sends it."""
if request.endpoint != func.func_name:
mail.send(message)
if (current_user.is_authenticated() and current_user.superuser):
return render_template('debug_email.html', message=message) | python | def render_or_send(func, message):
"""Renders an email message for debugging or actually sends it."""
if request.endpoint != func.func_name:
mail.send(message)
if (current_user.is_authenticated() and current_user.superuser):
return render_template('debug_email.html', message=message) | [
"def",
"render_or_send",
"(",
"func",
",",
"message",
")",
":",
"if",
"request",
".",
"endpoint",
"!=",
"func",
".",
"func_name",
":",
"mail",
".",
"send",
"(",
"message",
")",
"if",
"(",
"current_user",
".",
"is_authenticated",
"(",
")",
"and",
"current_user",
".",
"superuser",
")",
":",
"return",
"render_template",
"(",
"'debug_email.html'",
",",
"message",
"=",
"message",
")"
]
| Renders an email message for debugging or actually sends it. | [
"Renders",
"an",
"email",
"message",
"for",
"debugging",
"or",
"actually",
"sends",
"it",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/emails.py#L33-L39 | train |
bslatkin/dpxdt | dpxdt/server/emails.py | send_ready_for_review | def send_ready_for_review(build_id, release_name, release_number):
"""Sends an email indicating that the release is ready for review."""
build = models.Build.query.get(build_id)
if not build.send_email:
logging.debug(
'Not sending ready for review email because build does not have '
'email enabled. build_id=%r', build.id)
return
ops = operations.BuildOps(build_id)
release, run_list, stats_dict, _ = ops.get_release(
release_name, release_number)
if not run_list:
logging.debug(
'Not sending ready for review email because there are '
' no runs. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
title = '%s: %s - Ready for review' % (build.name, release.name)
email_body = render_template(
'email_ready_for_review.html',
build=build,
release=release,
run_list=run_list,
stats_dict=stats_dict)
recipients = []
if build.email_alias:
recipients.append(build.email_alias)
else:
for user in build.owners:
recipients.append(user.email_address)
if not recipients:
logging.debug(
'Not sending ready for review email because there are no '
'recipients. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
message = Message(title, recipients=recipients)
message.html = email_body
logging.info('Sending ready for review email for build_id=%r, '
'release_name=%r, release_number=%d to %r',
build.id, release.name, release.number, recipients)
return render_or_send(send_ready_for_review, message) | python | def send_ready_for_review(build_id, release_name, release_number):
"""Sends an email indicating that the release is ready for review."""
build = models.Build.query.get(build_id)
if not build.send_email:
logging.debug(
'Not sending ready for review email because build does not have '
'email enabled. build_id=%r', build.id)
return
ops = operations.BuildOps(build_id)
release, run_list, stats_dict, _ = ops.get_release(
release_name, release_number)
if not run_list:
logging.debug(
'Not sending ready for review email because there are '
' no runs. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
title = '%s: %s - Ready for review' % (build.name, release.name)
email_body = render_template(
'email_ready_for_review.html',
build=build,
release=release,
run_list=run_list,
stats_dict=stats_dict)
recipients = []
if build.email_alias:
recipients.append(build.email_alias)
else:
for user in build.owners:
recipients.append(user.email_address)
if not recipients:
logging.debug(
'Not sending ready for review email because there are no '
'recipients. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
message = Message(title, recipients=recipients)
message.html = email_body
logging.info('Sending ready for review email for build_id=%r, '
'release_name=%r, release_number=%d to %r',
build.id, release.name, release.number, recipients)
return render_or_send(send_ready_for_review, message) | [
"def",
"send_ready_for_review",
"(",
"build_id",
",",
"release_name",
",",
"release_number",
")",
":",
"build",
"=",
"models",
".",
"Build",
".",
"query",
".",
"get",
"(",
"build_id",
")",
"if",
"not",
"build",
".",
"send_email",
":",
"logging",
".",
"debug",
"(",
"'Not sending ready for review email because build does not have '",
"'email enabled. build_id=%r'",
",",
"build",
".",
"id",
")",
"return",
"ops",
"=",
"operations",
".",
"BuildOps",
"(",
"build_id",
")",
"release",
",",
"run_list",
",",
"stats_dict",
",",
"_",
"=",
"ops",
".",
"get_release",
"(",
"release_name",
",",
"release_number",
")",
"if",
"not",
"run_list",
":",
"logging",
".",
"debug",
"(",
"'Not sending ready for review email because there are '",
"' no runs. build_id=%r, release_name=%r, release_number=%d'",
",",
"build",
".",
"id",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
")",
"return",
"title",
"=",
"'%s: %s - Ready for review'",
"%",
"(",
"build",
".",
"name",
",",
"release",
".",
"name",
")",
"email_body",
"=",
"render_template",
"(",
"'email_ready_for_review.html'",
",",
"build",
"=",
"build",
",",
"release",
"=",
"release",
",",
"run_list",
"=",
"run_list",
",",
"stats_dict",
"=",
"stats_dict",
")",
"recipients",
"=",
"[",
"]",
"if",
"build",
".",
"email_alias",
":",
"recipients",
".",
"append",
"(",
"build",
".",
"email_alias",
")",
"else",
":",
"for",
"user",
"in",
"build",
".",
"owners",
":",
"recipients",
".",
"append",
"(",
"user",
".",
"email_address",
")",
"if",
"not",
"recipients",
":",
"logging",
".",
"debug",
"(",
"'Not sending ready for review email because there are no '",
"'recipients. build_id=%r, release_name=%r, release_number=%d'",
",",
"build",
".",
"id",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
")",
"return",
"message",
"=",
"Message",
"(",
"title",
",",
"recipients",
"=",
"recipients",
")",
"message",
".",
"html",
"=",
"email_body",
"logging",
".",
"info",
"(",
"'Sending ready for review email for build_id=%r, '",
"'release_name=%r, release_number=%d to %r'",
",",
"build",
".",
"id",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
",",
"recipients",
")",
"return",
"render_or_send",
"(",
"send_ready_for_review",
",",
"message",
")"
]
| Sends an email indicating that the release is ready for review. | [
"Sends",
"an",
"email",
"indicating",
"that",
"the",
"release",
"is",
"ready",
"for",
"review",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/emails.py#L45-L96 | train |
bslatkin/dpxdt | dpxdt/server/frontend.py | homepage | def homepage():
"""Renders the homepage."""
if current_user.is_authenticated():
if not login_fresh():
logging.debug('User needs a fresh token')
abort(login.needs_refresh())
auth.claim_invitations(current_user)
build_list = operations.UserOps(current_user.get_id()).get_builds()
return render_template(
'home.html',
build_list=build_list,
show_video_and_promo_text=app.config['SHOW_VIDEO_AND_PROMO_TEXT']) | python | def homepage():
"""Renders the homepage."""
if current_user.is_authenticated():
if not login_fresh():
logging.debug('User needs a fresh token')
abort(login.needs_refresh())
auth.claim_invitations(current_user)
build_list = operations.UserOps(current_user.get_id()).get_builds()
return render_template(
'home.html',
build_list=build_list,
show_video_and_promo_text=app.config['SHOW_VIDEO_AND_PROMO_TEXT']) | [
"def",
"homepage",
"(",
")",
":",
"if",
"current_user",
".",
"is_authenticated",
"(",
")",
":",
"if",
"not",
"login_fresh",
"(",
")",
":",
"logging",
".",
"debug",
"(",
"'User needs a fresh token'",
")",
"abort",
"(",
"login",
".",
"needs_refresh",
"(",
")",
")",
"auth",
".",
"claim_invitations",
"(",
"current_user",
")",
"build_list",
"=",
"operations",
".",
"UserOps",
"(",
"current_user",
".",
"get_id",
"(",
")",
")",
".",
"get_builds",
"(",
")",
"return",
"render_template",
"(",
"'home.html'",
",",
"build_list",
"=",
"build_list",
",",
"show_video_and_promo_text",
"=",
"app",
".",
"config",
"[",
"'SHOW_VIDEO_AND_PROMO_TEXT'",
"]",
")"
]
| Renders the homepage. | [
"Renders",
"the",
"homepage",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/frontend.py#L55-L68 | train |
bslatkin/dpxdt | dpxdt/server/frontend.py | new_build | def new_build():
"""Page for crediting or editing a build."""
form = forms.BuildForm()
if form.validate_on_submit():
build = models.Build()
form.populate_obj(build)
build.owners.append(current_user)
db.session.add(build)
db.session.flush()
auth.save_admin_log(build, created_build=True, message=build.name)
db.session.commit()
operations.UserOps(current_user.get_id()).evict()
logging.info('Created build via UI: build_id=%r, name=%r',
build.id, build.name)
return redirect(url_for('view_build', id=build.id))
return render_template(
'new_build.html',
build_form=form) | python | def new_build():
"""Page for crediting or editing a build."""
form = forms.BuildForm()
if form.validate_on_submit():
build = models.Build()
form.populate_obj(build)
build.owners.append(current_user)
db.session.add(build)
db.session.flush()
auth.save_admin_log(build, created_build=True, message=build.name)
db.session.commit()
operations.UserOps(current_user.get_id()).evict()
logging.info('Created build via UI: build_id=%r, name=%r',
build.id, build.name)
return redirect(url_for('view_build', id=build.id))
return render_template(
'new_build.html',
build_form=form) | [
"def",
"new_build",
"(",
")",
":",
"form",
"=",
"forms",
".",
"BuildForm",
"(",
")",
"if",
"form",
".",
"validate_on_submit",
"(",
")",
":",
"build",
"=",
"models",
".",
"Build",
"(",
")",
"form",
".",
"populate_obj",
"(",
"build",
")",
"build",
".",
"owners",
".",
"append",
"(",
"current_user",
")",
"db",
".",
"session",
".",
"add",
"(",
"build",
")",
"db",
".",
"session",
".",
"flush",
"(",
")",
"auth",
".",
"save_admin_log",
"(",
"build",
",",
"created_build",
"=",
"True",
",",
"message",
"=",
"build",
".",
"name",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"operations",
".",
"UserOps",
"(",
"current_user",
".",
"get_id",
"(",
")",
")",
".",
"evict",
"(",
")",
"logging",
".",
"info",
"(",
"'Created build via UI: build_id=%r, name=%r'",
",",
"build",
".",
"id",
",",
"build",
".",
"name",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'view_build'",
",",
"id",
"=",
"build",
".",
"id",
")",
")",
"return",
"render_template",
"(",
"'new_build.html'",
",",
"build_form",
"=",
"form",
")"
]
| Page for crediting or editing a build. | [
"Page",
"for",
"crediting",
"or",
"editing",
"a",
"build",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/frontend.py#L73-L96 | train |
bslatkin/dpxdt | dpxdt/server/frontend.py | view_build | def view_build():
"""Page for viewing all releases in a build."""
build = g.build
page_size = min(request.args.get('page_size', 10, type=int), 50)
offset = request.args.get('offset', 0, type=int)
ops = operations.BuildOps(build.id)
has_next_page, candidate_list, stats_counts = ops.get_candidates(
page_size, offset)
# Collate by release name, order releases by latest creation. Init stats.
release_dict = {}
created_dict = {}
run_stats_dict = {}
for candidate in candidate_list:
release_list = release_dict.setdefault(candidate.name, [])
release_list.append(candidate)
max_created = created_dict.get(candidate.name, candidate.created)
created_dict[candidate.name] = max(candidate.created, max_created)
run_stats_dict[candidate.id] = dict(
runs_total=0,
runs_complete=0,
runs_successful=0,
runs_failed=0,
runs_baseline=0,
runs_pending=0)
# Sort each release by candidate number descending
for release_list in release_dict.itervalues():
release_list.sort(key=lambda x: x.number, reverse=True)
# Sort all releases by created time descending
release_age_list = [
(value, key) for key, value in created_dict.iteritems()]
release_age_list.sort(reverse=True)
release_name_list = [key for _, key in release_age_list]
# Count totals for each run state within that release.
for candidate_id, status, count in stats_counts:
stats_dict = run_stats_dict[candidate_id]
for key in ops.get_stats_keys(status):
stats_dict[key] += count
return render_template(
'view_build.html',
build=build,
release_name_list=release_name_list,
release_dict=release_dict,
run_stats_dict=run_stats_dict,
has_next_page=has_next_page,
current_offset=offset,
next_offset=offset + page_size,
last_offset=max(0, offset - page_size),
page_size=page_size) | python | def view_build():
"""Page for viewing all releases in a build."""
build = g.build
page_size = min(request.args.get('page_size', 10, type=int), 50)
offset = request.args.get('offset', 0, type=int)
ops = operations.BuildOps(build.id)
has_next_page, candidate_list, stats_counts = ops.get_candidates(
page_size, offset)
# Collate by release name, order releases by latest creation. Init stats.
release_dict = {}
created_dict = {}
run_stats_dict = {}
for candidate in candidate_list:
release_list = release_dict.setdefault(candidate.name, [])
release_list.append(candidate)
max_created = created_dict.get(candidate.name, candidate.created)
created_dict[candidate.name] = max(candidate.created, max_created)
run_stats_dict[candidate.id] = dict(
runs_total=0,
runs_complete=0,
runs_successful=0,
runs_failed=0,
runs_baseline=0,
runs_pending=0)
# Sort each release by candidate number descending
for release_list in release_dict.itervalues():
release_list.sort(key=lambda x: x.number, reverse=True)
# Sort all releases by created time descending
release_age_list = [
(value, key) for key, value in created_dict.iteritems()]
release_age_list.sort(reverse=True)
release_name_list = [key for _, key in release_age_list]
# Count totals for each run state within that release.
for candidate_id, status, count in stats_counts:
stats_dict = run_stats_dict[candidate_id]
for key in ops.get_stats_keys(status):
stats_dict[key] += count
return render_template(
'view_build.html',
build=build,
release_name_list=release_name_list,
release_dict=release_dict,
run_stats_dict=run_stats_dict,
has_next_page=has_next_page,
current_offset=offset,
next_offset=offset + page_size,
last_offset=max(0, offset - page_size),
page_size=page_size) | [
"def",
"view_build",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"page_size",
"=",
"min",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'page_size'",
",",
"10",
",",
"type",
"=",
"int",
")",
",",
"50",
")",
"offset",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'offset'",
",",
"0",
",",
"type",
"=",
"int",
")",
"ops",
"=",
"operations",
".",
"BuildOps",
"(",
"build",
".",
"id",
")",
"has_next_page",
",",
"candidate_list",
",",
"stats_counts",
"=",
"ops",
".",
"get_candidates",
"(",
"page_size",
",",
"offset",
")",
"# Collate by release name, order releases by latest creation. Init stats.",
"release_dict",
"=",
"{",
"}",
"created_dict",
"=",
"{",
"}",
"run_stats_dict",
"=",
"{",
"}",
"for",
"candidate",
"in",
"candidate_list",
":",
"release_list",
"=",
"release_dict",
".",
"setdefault",
"(",
"candidate",
".",
"name",
",",
"[",
"]",
")",
"release_list",
".",
"append",
"(",
"candidate",
")",
"max_created",
"=",
"created_dict",
".",
"get",
"(",
"candidate",
".",
"name",
",",
"candidate",
".",
"created",
")",
"created_dict",
"[",
"candidate",
".",
"name",
"]",
"=",
"max",
"(",
"candidate",
".",
"created",
",",
"max_created",
")",
"run_stats_dict",
"[",
"candidate",
".",
"id",
"]",
"=",
"dict",
"(",
"runs_total",
"=",
"0",
",",
"runs_complete",
"=",
"0",
",",
"runs_successful",
"=",
"0",
",",
"runs_failed",
"=",
"0",
",",
"runs_baseline",
"=",
"0",
",",
"runs_pending",
"=",
"0",
")",
"# Sort each release by candidate number descending",
"for",
"release_list",
"in",
"release_dict",
".",
"itervalues",
"(",
")",
":",
"release_list",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"number",
",",
"reverse",
"=",
"True",
")",
"# Sort all releases by created time descending",
"release_age_list",
"=",
"[",
"(",
"value",
",",
"key",
")",
"for",
"key",
",",
"value",
"in",
"created_dict",
".",
"iteritems",
"(",
")",
"]",
"release_age_list",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"release_name_list",
"=",
"[",
"key",
"for",
"_",
",",
"key",
"in",
"release_age_list",
"]",
"# Count totals for each run state within that release.",
"for",
"candidate_id",
",",
"status",
",",
"count",
"in",
"stats_counts",
":",
"stats_dict",
"=",
"run_stats_dict",
"[",
"candidate_id",
"]",
"for",
"key",
"in",
"ops",
".",
"get_stats_keys",
"(",
"status",
")",
":",
"stats_dict",
"[",
"key",
"]",
"+=",
"count",
"return",
"render_template",
"(",
"'view_build.html'",
",",
"build",
"=",
"build",
",",
"release_name_list",
"=",
"release_name_list",
",",
"release_dict",
"=",
"release_dict",
",",
"run_stats_dict",
"=",
"run_stats_dict",
",",
"has_next_page",
"=",
"has_next_page",
",",
"current_offset",
"=",
"offset",
",",
"next_offset",
"=",
"offset",
"+",
"page_size",
",",
"last_offset",
"=",
"max",
"(",
"0",
",",
"offset",
"-",
"page_size",
")",
",",
"page_size",
"=",
"page_size",
")"
]
| Page for viewing all releases in a build. | [
"Page",
"for",
"viewing",
"all",
"releases",
"in",
"a",
"build",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/frontend.py#L101-L154 | train |
bslatkin/dpxdt | dpxdt/server/frontend.py | view_release | def view_release():
"""Page for viewing all tests runs in a release."""
build = g.build
if request.method == 'POST':
form = forms.ReleaseForm(request.form)
else:
form = forms.ReleaseForm(request.args)
form.validate()
ops = operations.BuildOps(build.id)
release, run_list, stats_dict, approval_log = ops.get_release(
form.name.data, form.number.data)
if not release:
abort(404)
if request.method == 'POST':
decision_states = (
models.Release.REVIEWING,
models.Release.RECEIVING,
models.Release.PROCESSING)
if form.good.data and release.status in decision_states:
release.status = models.Release.GOOD
auth.save_admin_log(build, release_good=True, release=release)
elif form.bad.data and release.status in decision_states:
release.status = models.Release.BAD
auth.save_admin_log(build, release_bad=True, release=release)
elif form.reviewing.data and release.status in (
models.Release.GOOD, models.Release.BAD):
release.status = models.Release.REVIEWING
auth.save_admin_log(build, release_reviewing=True, release=release)
else:
logging.warning(
'Bad state transition for name=%r, number=%r, form=%r',
release.name, release.number, form.data)
abort(400)
db.session.add(release)
db.session.commit()
ops.evict()
return redirect(url_for(
'view_release',
id=build.id,
name=release.name,
number=release.number))
# Update form values for rendering
form.good.data = True
form.bad.data = True
form.reviewing.data = True
return render_template(
'view_release.html',
build=build,
release=release,
run_list=run_list,
release_form=form,
approval_log=approval_log,
stats_dict=stats_dict) | python | def view_release():
"""Page for viewing all tests runs in a release."""
build = g.build
if request.method == 'POST':
form = forms.ReleaseForm(request.form)
else:
form = forms.ReleaseForm(request.args)
form.validate()
ops = operations.BuildOps(build.id)
release, run_list, stats_dict, approval_log = ops.get_release(
form.name.data, form.number.data)
if not release:
abort(404)
if request.method == 'POST':
decision_states = (
models.Release.REVIEWING,
models.Release.RECEIVING,
models.Release.PROCESSING)
if form.good.data and release.status in decision_states:
release.status = models.Release.GOOD
auth.save_admin_log(build, release_good=True, release=release)
elif form.bad.data and release.status in decision_states:
release.status = models.Release.BAD
auth.save_admin_log(build, release_bad=True, release=release)
elif form.reviewing.data and release.status in (
models.Release.GOOD, models.Release.BAD):
release.status = models.Release.REVIEWING
auth.save_admin_log(build, release_reviewing=True, release=release)
else:
logging.warning(
'Bad state transition for name=%r, number=%r, form=%r',
release.name, release.number, form.data)
abort(400)
db.session.add(release)
db.session.commit()
ops.evict()
return redirect(url_for(
'view_release',
id=build.id,
name=release.name,
number=release.number))
# Update form values for rendering
form.good.data = True
form.bad.data = True
form.reviewing.data = True
return render_template(
'view_release.html',
build=build,
release=release,
run_list=run_list,
release_form=form,
approval_log=approval_log,
stats_dict=stats_dict) | [
"def",
"view_release",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"form",
"=",
"forms",
".",
"ReleaseForm",
"(",
"request",
".",
"form",
")",
"else",
":",
"form",
"=",
"forms",
".",
"ReleaseForm",
"(",
"request",
".",
"args",
")",
"form",
".",
"validate",
"(",
")",
"ops",
"=",
"operations",
".",
"BuildOps",
"(",
"build",
".",
"id",
")",
"release",
",",
"run_list",
",",
"stats_dict",
",",
"approval_log",
"=",
"ops",
".",
"get_release",
"(",
"form",
".",
"name",
".",
"data",
",",
"form",
".",
"number",
".",
"data",
")",
"if",
"not",
"release",
":",
"abort",
"(",
"404",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"decision_states",
"=",
"(",
"models",
".",
"Release",
".",
"REVIEWING",
",",
"models",
".",
"Release",
".",
"RECEIVING",
",",
"models",
".",
"Release",
".",
"PROCESSING",
")",
"if",
"form",
".",
"good",
".",
"data",
"and",
"release",
".",
"status",
"in",
"decision_states",
":",
"release",
".",
"status",
"=",
"models",
".",
"Release",
".",
"GOOD",
"auth",
".",
"save_admin_log",
"(",
"build",
",",
"release_good",
"=",
"True",
",",
"release",
"=",
"release",
")",
"elif",
"form",
".",
"bad",
".",
"data",
"and",
"release",
".",
"status",
"in",
"decision_states",
":",
"release",
".",
"status",
"=",
"models",
".",
"Release",
".",
"BAD",
"auth",
".",
"save_admin_log",
"(",
"build",
",",
"release_bad",
"=",
"True",
",",
"release",
"=",
"release",
")",
"elif",
"form",
".",
"reviewing",
".",
"data",
"and",
"release",
".",
"status",
"in",
"(",
"models",
".",
"Release",
".",
"GOOD",
",",
"models",
".",
"Release",
".",
"BAD",
")",
":",
"release",
".",
"status",
"=",
"models",
".",
"Release",
".",
"REVIEWING",
"auth",
".",
"save_admin_log",
"(",
"build",
",",
"release_reviewing",
"=",
"True",
",",
"release",
"=",
"release",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"'Bad state transition for name=%r, number=%r, form=%r'",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
",",
"form",
".",
"data",
")",
"abort",
"(",
"400",
")",
"db",
".",
"session",
".",
"add",
"(",
"release",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"ops",
".",
"evict",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'view_release'",
",",
"id",
"=",
"build",
".",
"id",
",",
"name",
"=",
"release",
".",
"name",
",",
"number",
"=",
"release",
".",
"number",
")",
")",
"# Update form values for rendering",
"form",
".",
"good",
".",
"data",
"=",
"True",
"form",
".",
"bad",
".",
"data",
"=",
"True",
"form",
".",
"reviewing",
".",
"data",
"=",
"True",
"return",
"render_template",
"(",
"'view_release.html'",
",",
"build",
"=",
"build",
",",
"release",
"=",
"release",
",",
"run_list",
"=",
"run_list",
",",
"release_form",
"=",
"form",
",",
"approval_log",
"=",
"approval_log",
",",
"stats_dict",
"=",
"stats_dict",
")"
]
| Page for viewing all tests runs in a release. | [
"Page",
"for",
"viewing",
"all",
"tests",
"runs",
"in",
"a",
"release",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/frontend.py#L159-L221 | train |
bslatkin/dpxdt | dpxdt/server/frontend.py | _get_artifact_context | def _get_artifact_context(run, file_type):
"""Gets the artifact details for the given run and file_type."""
sha1sum = None
image_file = False
log_file = False
config_file = False
if request.path == '/image':
image_file = True
if file_type == 'before':
sha1sum = run.ref_image
elif file_type == 'diff':
sha1sum = run.diff_image
elif file_type == 'after':
sha1sum = run.image
else:
abort(400)
elif request.path == '/log':
log_file = True
if file_type == 'before':
sha1sum = run.ref_log
elif file_type == 'diff':
sha1sum = run.diff_log
elif file_type == 'after':
sha1sum = run.log
else:
abort(400)
elif request.path == '/config':
config_file = True
if file_type == 'before':
sha1sum = run.ref_config
elif file_type == 'after':
sha1sum = run.config
else:
abort(400)
return image_file, log_file, config_file, sha1sum | python | def _get_artifact_context(run, file_type):
"""Gets the artifact details for the given run and file_type."""
sha1sum = None
image_file = False
log_file = False
config_file = False
if request.path == '/image':
image_file = True
if file_type == 'before':
sha1sum = run.ref_image
elif file_type == 'diff':
sha1sum = run.diff_image
elif file_type == 'after':
sha1sum = run.image
else:
abort(400)
elif request.path == '/log':
log_file = True
if file_type == 'before':
sha1sum = run.ref_log
elif file_type == 'diff':
sha1sum = run.diff_log
elif file_type == 'after':
sha1sum = run.log
else:
abort(400)
elif request.path == '/config':
config_file = True
if file_type == 'before':
sha1sum = run.ref_config
elif file_type == 'after':
sha1sum = run.config
else:
abort(400)
return image_file, log_file, config_file, sha1sum | [
"def",
"_get_artifact_context",
"(",
"run",
",",
"file_type",
")",
":",
"sha1sum",
"=",
"None",
"image_file",
"=",
"False",
"log_file",
"=",
"False",
"config_file",
"=",
"False",
"if",
"request",
".",
"path",
"==",
"'/image'",
":",
"image_file",
"=",
"True",
"if",
"file_type",
"==",
"'before'",
":",
"sha1sum",
"=",
"run",
".",
"ref_image",
"elif",
"file_type",
"==",
"'diff'",
":",
"sha1sum",
"=",
"run",
".",
"diff_image",
"elif",
"file_type",
"==",
"'after'",
":",
"sha1sum",
"=",
"run",
".",
"image",
"else",
":",
"abort",
"(",
"400",
")",
"elif",
"request",
".",
"path",
"==",
"'/log'",
":",
"log_file",
"=",
"True",
"if",
"file_type",
"==",
"'before'",
":",
"sha1sum",
"=",
"run",
".",
"ref_log",
"elif",
"file_type",
"==",
"'diff'",
":",
"sha1sum",
"=",
"run",
".",
"diff_log",
"elif",
"file_type",
"==",
"'after'",
":",
"sha1sum",
"=",
"run",
".",
"log",
"else",
":",
"abort",
"(",
"400",
")",
"elif",
"request",
".",
"path",
"==",
"'/config'",
":",
"config_file",
"=",
"True",
"if",
"file_type",
"==",
"'before'",
":",
"sha1sum",
"=",
"run",
".",
"ref_config",
"elif",
"file_type",
"==",
"'after'",
":",
"sha1sum",
"=",
"run",
".",
"config",
"else",
":",
"abort",
"(",
"400",
")",
"return",
"image_file",
",",
"log_file",
",",
"config_file",
",",
"sha1sum"
]
| Gets the artifact details for the given run and file_type. | [
"Gets",
"the",
"artifact",
"details",
"for",
"the",
"given",
"run",
"and",
"file_type",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/frontend.py#L224-L260 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | get_coordinator | def get_coordinator():
"""Creates a coordinator and returns it."""
workflow_queue = Queue.Queue()
complete_queue = Queue.Queue()
coordinator = WorkflowThread(workflow_queue, complete_queue)
coordinator.register(WorkflowItem, workflow_queue)
return coordinator | python | def get_coordinator():
"""Creates a coordinator and returns it."""
workflow_queue = Queue.Queue()
complete_queue = Queue.Queue()
coordinator = WorkflowThread(workflow_queue, complete_queue)
coordinator.register(WorkflowItem, workflow_queue)
return coordinator | [
"def",
"get_coordinator",
"(",
")",
":",
"workflow_queue",
"=",
"Queue",
".",
"Queue",
"(",
")",
"complete_queue",
"=",
"Queue",
".",
"Queue",
"(",
")",
"coordinator",
"=",
"WorkflowThread",
"(",
"workflow_queue",
",",
"complete_queue",
")",
"coordinator",
".",
"register",
"(",
"WorkflowItem",
",",
"workflow_queue",
")",
"return",
"coordinator"
]
| Creates a coordinator and returns it. | [
"Creates",
"a",
"coordinator",
"and",
"returns",
"it",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L553-L559 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | WorkItem._print_repr | def _print_repr(self, depth):
"""Print this WorkItem to the given stack depth.
The depth parameter ensures that we can print WorkItems in
arbitrarily long chains without hitting the max stack depth.
This can happen with WaitForUrlWorkflowItems, which
create long chains of small waits.
"""
if depth <= 0:
return '%s.%s#%d' % (
self.__class__.__module__,
self.__class__.__name__,
id(self))
return '%s.%s(%s)#%d' % (
self.__class__.__module__,
self.__class__.__name__,
self._print_tree(self._get_dict_for_repr(), depth - 1),
id(self)) | python | def _print_repr(self, depth):
"""Print this WorkItem to the given stack depth.
The depth parameter ensures that we can print WorkItems in
arbitrarily long chains without hitting the max stack depth.
This can happen with WaitForUrlWorkflowItems, which
create long chains of small waits.
"""
if depth <= 0:
return '%s.%s#%d' % (
self.__class__.__module__,
self.__class__.__name__,
id(self))
return '%s.%s(%s)#%d' % (
self.__class__.__module__,
self.__class__.__name__,
self._print_tree(self._get_dict_for_repr(), depth - 1),
id(self)) | [
"def",
"_print_repr",
"(",
"self",
",",
"depth",
")",
":",
"if",
"depth",
"<=",
"0",
":",
"return",
"'%s.%s#%d'",
"%",
"(",
"self",
".",
"__class__",
".",
"__module__",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"id",
"(",
"self",
")",
")",
"return",
"'%s.%s(%s)#%d'",
"%",
"(",
"self",
".",
"__class__",
".",
"__module__",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"_print_tree",
"(",
"self",
".",
"_get_dict_for_repr",
"(",
")",
",",
"depth",
"-",
"1",
")",
",",
"id",
"(",
"self",
")",
")"
]
| Print this WorkItem to the given stack depth.
The depth parameter ensures that we can print WorkItems in
arbitrarily long chains without hitting the max stack depth.
This can happen with WaitForUrlWorkflowItems, which
create long chains of small waits. | [
"Print",
"this",
"WorkItem",
"to",
"the",
"given",
"stack",
"depth",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L74-L92 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | ResultList.error | def error(self):
"""Returns the error for this barrier and all work items, if any."""
# Copy the error from any failed item to be the error for the whole
# barrier. The first error seen "wins". Also handles the case where
# the WorkItems passed into the barrier have already completed and
# been marked with errors.
for item in self:
if isinstance(item, WorkItem) and item.error:
return item.error
return None | python | def error(self):
"""Returns the error for this barrier and all work items, if any."""
# Copy the error from any failed item to be the error for the whole
# barrier. The first error seen "wins". Also handles the case where
# the WorkItems passed into the barrier have already completed and
# been marked with errors.
for item in self:
if isinstance(item, WorkItem) and item.error:
return item.error
return None | [
"def",
"error",
"(",
"self",
")",
":",
"# Copy the error from any failed item to be the error for the whole",
"# barrier. The first error seen \"wins\". Also handles the case where",
"# the WorkItems passed into the barrier have already completed and",
"# been marked with errors.",
"for",
"item",
"in",
"self",
":",
"if",
"isinstance",
"(",
"item",
",",
"WorkItem",
")",
"and",
"item",
".",
"error",
":",
"return",
"item",
".",
"error",
"return",
"None"
]
| Returns the error for this barrier and all work items, if any. | [
"Returns",
"the",
"error",
"for",
"this",
"barrier",
"and",
"all",
"work",
"items",
"if",
"any",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L227-L236 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | Barrier.outstanding | def outstanding(self):
"""Returns whether or not this barrier has pending work."""
# Allow the same WorkItem to be yielded multiple times but not
# count towards blocking the barrier.
done_count = 0
for item in self:
if not self.wait_any and item.fire_and_forget:
# Only count fire_and_forget items as done if this is
# *not* a WaitAny barrier. We only want to return control
# to the caller when at least one of the blocking items
# has completed.
done_count += 1
elif item.done:
done_count += 1
if self.wait_any and done_count > 0:
return False
if done_count == len(self):
return False
return True | python | def outstanding(self):
"""Returns whether or not this barrier has pending work."""
# Allow the same WorkItem to be yielded multiple times but not
# count towards blocking the barrier.
done_count = 0
for item in self:
if not self.wait_any and item.fire_and_forget:
# Only count fire_and_forget items as done if this is
# *not* a WaitAny barrier. We only want to return control
# to the caller when at least one of the blocking items
# has completed.
done_count += 1
elif item.done:
done_count += 1
if self.wait_any and done_count > 0:
return False
if done_count == len(self):
return False
return True | [
"def",
"outstanding",
"(",
"self",
")",
":",
"# Allow the same WorkItem to be yielded multiple times but not",
"# count towards blocking the barrier.",
"done_count",
"=",
"0",
"for",
"item",
"in",
"self",
":",
"if",
"not",
"self",
".",
"wait_any",
"and",
"item",
".",
"fire_and_forget",
":",
"# Only count fire_and_forget items as done if this is",
"# *not* a WaitAny barrier. We only want to return control",
"# to the caller when at least one of the blocking items",
"# has completed.",
"done_count",
"+=",
"1",
"elif",
"item",
".",
"done",
":",
"done_count",
"+=",
"1",
"if",
"self",
".",
"wait_any",
"and",
"done_count",
">",
"0",
":",
"return",
"False",
"if",
"done_count",
"==",
"len",
"(",
"self",
")",
":",
"return",
"False",
"return",
"True"
]
| Returns whether or not this barrier has pending work. | [
"Returns",
"whether",
"or",
"not",
"this",
"barrier",
"has",
"pending",
"work",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L274-L295 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | Barrier.get_item | def get_item(self):
"""Returns the item to send back into the workflow generator."""
if self.was_list:
result = ResultList()
for item in self:
if isinstance(item, WorkflowItem):
if item.done and not item.error:
result.append(item.result)
else:
# When there's an error or the workflow isn't done yet,
# just return the original WorkflowItem so the caller
# can inspect its entire state.
result.append(item)
else:
result.append(item)
return result
else:
return self[0] | python | def get_item(self):
"""Returns the item to send back into the workflow generator."""
if self.was_list:
result = ResultList()
for item in self:
if isinstance(item, WorkflowItem):
if item.done and not item.error:
result.append(item.result)
else:
# When there's an error or the workflow isn't done yet,
# just return the original WorkflowItem so the caller
# can inspect its entire state.
result.append(item)
else:
result.append(item)
return result
else:
return self[0] | [
"def",
"get_item",
"(",
"self",
")",
":",
"if",
"self",
".",
"was_list",
":",
"result",
"=",
"ResultList",
"(",
")",
"for",
"item",
"in",
"self",
":",
"if",
"isinstance",
"(",
"item",
",",
"WorkflowItem",
")",
":",
"if",
"item",
".",
"done",
"and",
"not",
"item",
".",
"error",
":",
"result",
".",
"append",
"(",
"item",
".",
"result",
")",
"else",
":",
"# When there's an error or the workflow isn't done yet,",
"# just return the original WorkflowItem so the caller",
"# can inspect its entire state.",
"result",
".",
"append",
"(",
"item",
")",
"else",
":",
"result",
".",
"append",
"(",
"item",
")",
"return",
"result",
"else",
":",
"return",
"self",
"[",
"0",
"]"
]
| Returns the item to send back into the workflow generator. | [
"Returns",
"the",
"item",
"to",
"send",
"back",
"into",
"the",
"workflow",
"generator",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L297-L314 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | WorkflowThread.start | def start(self):
"""Starts the coordinator thread and all related worker threads."""
assert not self.interrupted
for thread in self.worker_threads:
thread.start()
WorkerThread.start(self) | python | def start(self):
"""Starts the coordinator thread and all related worker threads."""
assert not self.interrupted
for thread in self.worker_threads:
thread.start()
WorkerThread.start(self) | [
"def",
"start",
"(",
"self",
")",
":",
"assert",
"not",
"self",
".",
"interrupted",
"for",
"thread",
"in",
"self",
".",
"worker_threads",
":",
"thread",
".",
"start",
"(",
")",
"WorkerThread",
".",
"start",
"(",
"self",
")"
]
| Starts the coordinator thread and all related worker threads. | [
"Starts",
"the",
"coordinator",
"thread",
"and",
"all",
"related",
"worker",
"threads",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L429-L434 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | WorkflowThread.stop | def stop(self):
"""Stops the coordinator thread and all related threads."""
if self.interrupted:
return
for thread in self.worker_threads:
thread.interrupted = True
self.interrupted = True | python | def stop(self):
"""Stops the coordinator thread and all related threads."""
if self.interrupted:
return
for thread in self.worker_threads:
thread.interrupted = True
self.interrupted = True | [
"def",
"stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"interrupted",
":",
"return",
"for",
"thread",
"in",
"self",
".",
"worker_threads",
":",
"thread",
".",
"interrupted",
"=",
"True",
"self",
".",
"interrupted",
"=",
"True"
]
| Stops the coordinator thread and all related threads. | [
"Stops",
"the",
"coordinator",
"thread",
"and",
"all",
"related",
"threads",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L436-L442 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | WorkflowThread.join | def join(self):
"""Joins the coordinator thread and all worker threads."""
for thread in self.worker_threads:
thread.join()
WorkerThread.join(self) | python | def join(self):
"""Joins the coordinator thread and all worker threads."""
for thread in self.worker_threads:
thread.join()
WorkerThread.join(self) | [
"def",
"join",
"(",
"self",
")",
":",
"for",
"thread",
"in",
"self",
".",
"worker_threads",
":",
"thread",
".",
"join",
"(",
")",
"WorkerThread",
".",
"join",
"(",
"self",
")"
]
| Joins the coordinator thread and all worker threads. | [
"Joins",
"the",
"coordinator",
"thread",
"and",
"all",
"worker",
"threads",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L444-L448 | train |
bslatkin/dpxdt | dpxdt/client/workers.py | WorkflowThread.wait_one | def wait_one(self):
"""Waits until this worker has finished one work item or died."""
while True:
try:
item = self.output_queue.get(True, self.polltime)
except Queue.Empty:
continue
except KeyboardInterrupt:
LOGGER.debug('Exiting')
return
else:
item.check_result()
return | python | def wait_one(self):
"""Waits until this worker has finished one work item or died."""
while True:
try:
item = self.output_queue.get(True, self.polltime)
except Queue.Empty:
continue
except KeyboardInterrupt:
LOGGER.debug('Exiting')
return
else:
item.check_result()
return | [
"def",
"wait_one",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"item",
"=",
"self",
".",
"output_queue",
".",
"get",
"(",
"True",
",",
"self",
".",
"polltime",
")",
"except",
"Queue",
".",
"Empty",
":",
"continue",
"except",
"KeyboardInterrupt",
":",
"LOGGER",
".",
"debug",
"(",
"'Exiting'",
")",
"return",
"else",
":",
"item",
".",
"check_result",
"(",
")",
"return"
]
| Waits until this worker has finished one work item or died. | [
"Waits",
"until",
"this",
"worker",
"has",
"finished",
"one",
"work",
"item",
"or",
"died",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L450-L462 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | superuser_required | def superuser_required(f):
"""Requires the requestor to be a super user."""
@functools.wraps(f)
@login_required
def wrapped(*args, **kwargs):
if not (current_user.is_authenticated() and current_user.superuser):
abort(403)
return f(*args, **kwargs)
return wrapped | python | def superuser_required(f):
"""Requires the requestor to be a super user."""
@functools.wraps(f)
@login_required
def wrapped(*args, **kwargs):
if not (current_user.is_authenticated() and current_user.superuser):
abort(403)
return f(*args, **kwargs)
return wrapped | [
"def",
"superuser_required",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"@",
"login_required",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"(",
"current_user",
".",
"is_authenticated",
"(",
")",
"and",
"current_user",
".",
"superuser",
")",
":",
"abort",
"(",
"403",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
]
| Requires the requestor to be a super user. | [
"Requires",
"the",
"requestor",
"to",
"be",
"a",
"super",
"user",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L174-L182 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | can_user_access_build | def can_user_access_build(param_name):
"""Determines if the current user can access the build ID in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
The build the user has access to.
"""
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
if not build_id:
logging.debug('Build ID in param_name=%r was missing', param_name)
abort(400)
ops = operations.UserOps(current_user.get_id())
build, user_is_owner = ops.owns_build(build_id)
if not build:
logging.debug('Could not find build_id=%r', build_id)
abort(404)
if current_user.is_authenticated() and not user_is_owner:
# Assume the user should be able to access the build but can't because
# the cache is out of date. This forces the cache to repopulate, any
# outstanding user invitations to be completed, hopefully resulting in
# the user having access to the build.
ops.evict()
claim_invitations(current_user)
build, user_is_owner = ops.owns_build(build_id)
if not user_is_owner:
if current_user.is_authenticated() and current_user.superuser:
pass
elif request.method != 'GET':
logging.debug('No way to log in user via modifying request')
abort(403)
elif build.public:
pass
elif current_user.is_authenticated():
logging.debug('User does not have access to this build')
abort(flask.Response('You cannot access this build', 403))
else:
logging.debug('Redirecting user to login to get build access')
abort(login.unauthorized())
elif not login_fresh():
logging.debug('User login is old; forcing refresh')
abort(login.needs_refresh())
return build | python | def can_user_access_build(param_name):
"""Determines if the current user can access the build ID in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
The build the user has access to.
"""
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
if not build_id:
logging.debug('Build ID in param_name=%r was missing', param_name)
abort(400)
ops = operations.UserOps(current_user.get_id())
build, user_is_owner = ops.owns_build(build_id)
if not build:
logging.debug('Could not find build_id=%r', build_id)
abort(404)
if current_user.is_authenticated() and not user_is_owner:
# Assume the user should be able to access the build but can't because
# the cache is out of date. This forces the cache to repopulate, any
# outstanding user invitations to be completed, hopefully resulting in
# the user having access to the build.
ops.evict()
claim_invitations(current_user)
build, user_is_owner = ops.owns_build(build_id)
if not user_is_owner:
if current_user.is_authenticated() and current_user.superuser:
pass
elif request.method != 'GET':
logging.debug('No way to log in user via modifying request')
abort(403)
elif build.public:
pass
elif current_user.is_authenticated():
logging.debug('User does not have access to this build')
abort(flask.Response('You cannot access this build', 403))
else:
logging.debug('Redirecting user to login to get build access')
abort(login.unauthorized())
elif not login_fresh():
logging.debug('User login is old; forcing refresh')
abort(login.needs_refresh())
return build | [
"def",
"can_user_access_build",
"(",
"param_name",
")",
":",
"build_id",
"=",
"(",
"request",
".",
"args",
".",
"get",
"(",
"param_name",
",",
"type",
"=",
"int",
")",
"or",
"request",
".",
"form",
".",
"get",
"(",
"param_name",
",",
"type",
"=",
"int",
")",
"or",
"request",
".",
"json",
"[",
"param_name",
"]",
")",
"if",
"not",
"build_id",
":",
"logging",
".",
"debug",
"(",
"'Build ID in param_name=%r was missing'",
",",
"param_name",
")",
"abort",
"(",
"400",
")",
"ops",
"=",
"operations",
".",
"UserOps",
"(",
"current_user",
".",
"get_id",
"(",
")",
")",
"build",
",",
"user_is_owner",
"=",
"ops",
".",
"owns_build",
"(",
"build_id",
")",
"if",
"not",
"build",
":",
"logging",
".",
"debug",
"(",
"'Could not find build_id=%r'",
",",
"build_id",
")",
"abort",
"(",
"404",
")",
"if",
"current_user",
".",
"is_authenticated",
"(",
")",
"and",
"not",
"user_is_owner",
":",
"# Assume the user should be able to access the build but can't because",
"# the cache is out of date. This forces the cache to repopulate, any",
"# outstanding user invitations to be completed, hopefully resulting in",
"# the user having access to the build.",
"ops",
".",
"evict",
"(",
")",
"claim_invitations",
"(",
"current_user",
")",
"build",
",",
"user_is_owner",
"=",
"ops",
".",
"owns_build",
"(",
"build_id",
")",
"if",
"not",
"user_is_owner",
":",
"if",
"current_user",
".",
"is_authenticated",
"(",
")",
"and",
"current_user",
".",
"superuser",
":",
"pass",
"elif",
"request",
".",
"method",
"!=",
"'GET'",
":",
"logging",
".",
"debug",
"(",
"'No way to log in user via modifying request'",
")",
"abort",
"(",
"403",
")",
"elif",
"build",
".",
"public",
":",
"pass",
"elif",
"current_user",
".",
"is_authenticated",
"(",
")",
":",
"logging",
".",
"debug",
"(",
"'User does not have access to this build'",
")",
"abort",
"(",
"flask",
".",
"Response",
"(",
"'You cannot access this build'",
",",
"403",
")",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"'Redirecting user to login to get build access'",
")",
"abort",
"(",
"login",
".",
"unauthorized",
"(",
")",
")",
"elif",
"not",
"login_fresh",
"(",
")",
":",
"logging",
".",
"debug",
"(",
"'User login is old; forcing refresh'",
")",
"abort",
"(",
"login",
".",
"needs_refresh",
"(",
")",
")",
"return",
"build"
]
| Determines if the current user can access the build ID in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
The build the user has access to. | [
"Determines",
"if",
"the",
"current",
"user",
"can",
"access",
"the",
"build",
"ID",
"in",
"the",
"request",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L185-L236 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | build_access_required | def build_access_required(function_or_param_name):
"""Decorator ensures user has access to the build ID in the request.
May be used in two ways:
@build_access_required
def my_func(build):
...
@build_access_required('custom_build_id_param')
def my_func(build):
...
Always calls the given function with the models.Build entity as the
first positional argument.
"""
def get_wrapper(param_name, f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
g.build = can_user_access_build(param_name)
if not utils.is_production():
# Insert a sleep to emulate page loading in production.
time.sleep(0.5)
return f(*args, **kwargs)
return wrapped
if isinstance(function_or_param_name, basestring):
return lambda f: get_wrapper(function_or_param_name, f)
else:
return get_wrapper('id', function_or_param_name) | python | def build_access_required(function_or_param_name):
"""Decorator ensures user has access to the build ID in the request.
May be used in two ways:
@build_access_required
def my_func(build):
...
@build_access_required('custom_build_id_param')
def my_func(build):
...
Always calls the given function with the models.Build entity as the
first positional argument.
"""
def get_wrapper(param_name, f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
g.build = can_user_access_build(param_name)
if not utils.is_production():
# Insert a sleep to emulate page loading in production.
time.sleep(0.5)
return f(*args, **kwargs)
return wrapped
if isinstance(function_or_param_name, basestring):
return lambda f: get_wrapper(function_or_param_name, f)
else:
return get_wrapper('id', function_or_param_name) | [
"def",
"build_access_required",
"(",
"function_or_param_name",
")",
":",
"def",
"get_wrapper",
"(",
"param_name",
",",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"g",
".",
"build",
"=",
"can_user_access_build",
"(",
"param_name",
")",
"if",
"not",
"utils",
".",
"is_production",
"(",
")",
":",
"# Insert a sleep to emulate page loading in production.",
"time",
".",
"sleep",
"(",
"0.5",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped",
"if",
"isinstance",
"(",
"function_or_param_name",
",",
"basestring",
")",
":",
"return",
"lambda",
"f",
":",
"get_wrapper",
"(",
"function_or_param_name",
",",
"f",
")",
"else",
":",
"return",
"get_wrapper",
"(",
"'id'",
",",
"function_or_param_name",
")"
]
| Decorator ensures user has access to the build ID in the request.
May be used in two ways:
@build_access_required
def my_func(build):
...
@build_access_required('custom_build_id_param')
def my_func(build):
...
Always calls the given function with the models.Build entity as the
first positional argument. | [
"Decorator",
"ensures",
"user",
"has",
"access",
"to",
"the",
"build",
"ID",
"in",
"the",
"request",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L239-L268 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | _get_api_key_ops | def _get_api_key_ops():
"""Gets the operations.ApiKeyOps instance for the current request."""
auth_header = request.authorization
if not auth_header:
logging.debug('API request lacks authorization header')
abort(flask.Response(
'API key required', 401,
{'WWW-Authenticate': 'Basic realm="API key required"'}))
return operations.ApiKeyOps(auth_header.username, auth_header.password) | python | def _get_api_key_ops():
"""Gets the operations.ApiKeyOps instance for the current request."""
auth_header = request.authorization
if not auth_header:
logging.debug('API request lacks authorization header')
abort(flask.Response(
'API key required', 401,
{'WWW-Authenticate': 'Basic realm="API key required"'}))
return operations.ApiKeyOps(auth_header.username, auth_header.password) | [
"def",
"_get_api_key_ops",
"(",
")",
":",
"auth_header",
"=",
"request",
".",
"authorization",
"if",
"not",
"auth_header",
":",
"logging",
".",
"debug",
"(",
"'API request lacks authorization header'",
")",
"abort",
"(",
"flask",
".",
"Response",
"(",
"'API key required'",
",",
"401",
",",
"{",
"'WWW-Authenticate'",
":",
"'Basic realm=\"API key required\"'",
"}",
")",
")",
"return",
"operations",
".",
"ApiKeyOps",
"(",
"auth_header",
".",
"username",
",",
"auth_header",
".",
"password",
")"
]
| Gets the operations.ApiKeyOps instance for the current request. | [
"Gets",
"the",
"operations",
".",
"ApiKeyOps",
"instance",
"for",
"the",
"current",
"request",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L271-L280 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | current_api_key | def current_api_key():
"""Determines the API key for the current request.
Returns:
The ApiKey instance.
"""
if app.config.get('IGNORE_AUTH'):
return models.ApiKey(
id='anonymous_superuser',
secret='',
superuser=True)
ops = _get_api_key_ops()
api_key = ops.get()
logging.debug('Authenticated as API key=%r', api_key.id)
return api_key | python | def current_api_key():
"""Determines the API key for the current request.
Returns:
The ApiKey instance.
"""
if app.config.get('IGNORE_AUTH'):
return models.ApiKey(
id='anonymous_superuser',
secret='',
superuser=True)
ops = _get_api_key_ops()
api_key = ops.get()
logging.debug('Authenticated as API key=%r', api_key.id)
return api_key | [
"def",
"current_api_key",
"(",
")",
":",
"if",
"app",
".",
"config",
".",
"get",
"(",
"'IGNORE_AUTH'",
")",
":",
"return",
"models",
".",
"ApiKey",
"(",
"id",
"=",
"'anonymous_superuser'",
",",
"secret",
"=",
"''",
",",
"superuser",
"=",
"True",
")",
"ops",
"=",
"_get_api_key_ops",
"(",
")",
"api_key",
"=",
"ops",
".",
"get",
"(",
")",
"logging",
".",
"debug",
"(",
"'Authenticated as API key=%r'",
",",
"api_key",
".",
"id",
")",
"return",
"api_key"
]
| Determines the API key for the current request.
Returns:
The ApiKey instance. | [
"Determines",
"the",
"API",
"key",
"for",
"the",
"current",
"request",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L283-L299 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | can_api_key_access_build | def can_api_key_access_build(param_name):
"""Determines if the current API key can access the build in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
(api_key, build) The API Key and the Build it has access to.
"""
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
utils.jsonify_assert(build_id, 'build_id required')
if app.config.get('IGNORE_AUTH'):
api_key = models.ApiKey(
id='anonymous_superuser',
secret='',
superuser=True)
build = models.Build.query.get(build_id)
utils.jsonify_assert(build is not None, 'build must exist', 404)
else:
ops = _get_api_key_ops()
api_key, build = ops.can_access_build(build_id)
return api_key, build | python | def can_api_key_access_build(param_name):
"""Determines if the current API key can access the build in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
(api_key, build) The API Key and the Build it has access to.
"""
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
utils.jsonify_assert(build_id, 'build_id required')
if app.config.get('IGNORE_AUTH'):
api_key = models.ApiKey(
id='anonymous_superuser',
secret='',
superuser=True)
build = models.Build.query.get(build_id)
utils.jsonify_assert(build is not None, 'build must exist', 404)
else:
ops = _get_api_key_ops()
api_key, build = ops.can_access_build(build_id)
return api_key, build | [
"def",
"can_api_key_access_build",
"(",
"param_name",
")",
":",
"build_id",
"=",
"(",
"request",
".",
"args",
".",
"get",
"(",
"param_name",
",",
"type",
"=",
"int",
")",
"or",
"request",
".",
"form",
".",
"get",
"(",
"param_name",
",",
"type",
"=",
"int",
")",
"or",
"request",
".",
"json",
"[",
"param_name",
"]",
")",
"utils",
".",
"jsonify_assert",
"(",
"build_id",
",",
"'build_id required'",
")",
"if",
"app",
".",
"config",
".",
"get",
"(",
"'IGNORE_AUTH'",
")",
":",
"api_key",
"=",
"models",
".",
"ApiKey",
"(",
"id",
"=",
"'anonymous_superuser'",
",",
"secret",
"=",
"''",
",",
"superuser",
"=",
"True",
")",
"build",
"=",
"models",
".",
"Build",
".",
"query",
".",
"get",
"(",
"build_id",
")",
"utils",
".",
"jsonify_assert",
"(",
"build",
"is",
"not",
"None",
",",
"'build must exist'",
",",
"404",
")",
"else",
":",
"ops",
"=",
"_get_api_key_ops",
"(",
")",
"api_key",
",",
"build",
"=",
"ops",
".",
"can_access_build",
"(",
"build_id",
")",
"return",
"api_key",
",",
"build"
]
| Determines if the current API key can access the build in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
(api_key, build) The API Key and the Build it has access to. | [
"Determines",
"if",
"the",
"current",
"API",
"key",
"can",
"access",
"the",
"build",
"in",
"the",
"request",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L302-L329 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | build_api_access_required | def build_api_access_required(f):
"""Decorator ensures API key has access to the build ID in the request.
Always calls the given function with the models.Build entity as the
first positional argument.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
g.api_key, g.build = can_api_key_access_build('build_id')
return f(*args, **kwargs)
return wrapped | python | def build_api_access_required(f):
"""Decorator ensures API key has access to the build ID in the request.
Always calls the given function with the models.Build entity as the
first positional argument.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
g.api_key, g.build = can_api_key_access_build('build_id')
return f(*args, **kwargs)
return wrapped | [
"def",
"build_api_access_required",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"g",
".",
"api_key",
",",
"g",
".",
"build",
"=",
"can_api_key_access_build",
"(",
"'build_id'",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
]
| Decorator ensures API key has access to the build ID in the request.
Always calls the given function with the models.Build entity as the
first positional argument. | [
"Decorator",
"ensures",
"API",
"key",
"has",
"access",
"to",
"the",
"build",
"ID",
"in",
"the",
"request",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L332-L342 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | superuser_api_key_required | def superuser_api_key_required(f):
"""Decorator ensures only superuser API keys can request this function."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
api_key = current_api_key()
g.api_key = api_key
utils.jsonify_assert(
api_key.superuser,
'API key=%r must be a super user' % api_key.id,
403)
return f(*args, **kwargs)
return wrapped | python | def superuser_api_key_required(f):
"""Decorator ensures only superuser API keys can request this function."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
api_key = current_api_key()
g.api_key = api_key
utils.jsonify_assert(
api_key.superuser,
'API key=%r must be a super user' % api_key.id,
403)
return f(*args, **kwargs)
return wrapped | [
"def",
"superuser_api_key_required",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"api_key",
"=",
"current_api_key",
"(",
")",
"g",
".",
"api_key",
"=",
"api_key",
"utils",
".",
"jsonify_assert",
"(",
"api_key",
".",
"superuser",
",",
"'API key=%r must be a super user'",
"%",
"api_key",
".",
"id",
",",
"403",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
]
| Decorator ensures only superuser API keys can request this function. | [
"Decorator",
"ensures",
"only",
"superuser",
"API",
"keys",
"can",
"request",
"this",
"function",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L345-L359 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | manage_api_keys | def manage_api_keys():
"""Page for viewing and creating API keys."""
build = g.build
create_form = forms.CreateApiKeyForm()
if create_form.validate_on_submit():
api_key = models.ApiKey()
create_form.populate_obj(api_key)
api_key.id = utils.human_uuid()
api_key.secret = utils.password_uuid()
save_admin_log(build, created_api_key=True, message=api_key.id)
db.session.add(api_key)
db.session.commit()
logging.info('Created API key=%r for build_id=%r',
api_key.id, build.id)
return redirect(url_for('manage_api_keys', build_id=build.id))
create_form.build_id.data = build.id
api_key_query = (
models.ApiKey.query
.filter_by(build_id=build.id)
.order_by(models.ApiKey.created.desc())
.limit(1000))
revoke_form_list = []
for api_key in api_key_query:
form = forms.RevokeApiKeyForm()
form.id.data = api_key.id
form.build_id.data = build.id
form.revoke.data = True
revoke_form_list.append((api_key, form))
return render_template(
'view_api_keys.html',
build=build,
create_form=create_form,
revoke_form_list=revoke_form_list) | python | def manage_api_keys():
"""Page for viewing and creating API keys."""
build = g.build
create_form = forms.CreateApiKeyForm()
if create_form.validate_on_submit():
api_key = models.ApiKey()
create_form.populate_obj(api_key)
api_key.id = utils.human_uuid()
api_key.secret = utils.password_uuid()
save_admin_log(build, created_api_key=True, message=api_key.id)
db.session.add(api_key)
db.session.commit()
logging.info('Created API key=%r for build_id=%r',
api_key.id, build.id)
return redirect(url_for('manage_api_keys', build_id=build.id))
create_form.build_id.data = build.id
api_key_query = (
models.ApiKey.query
.filter_by(build_id=build.id)
.order_by(models.ApiKey.created.desc())
.limit(1000))
revoke_form_list = []
for api_key in api_key_query:
form = forms.RevokeApiKeyForm()
form.id.data = api_key.id
form.build_id.data = build.id
form.revoke.data = True
revoke_form_list.append((api_key, form))
return render_template(
'view_api_keys.html',
build=build,
create_form=create_form,
revoke_form_list=revoke_form_list) | [
"def",
"manage_api_keys",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"create_form",
"=",
"forms",
".",
"CreateApiKeyForm",
"(",
")",
"if",
"create_form",
".",
"validate_on_submit",
"(",
")",
":",
"api_key",
"=",
"models",
".",
"ApiKey",
"(",
")",
"create_form",
".",
"populate_obj",
"(",
"api_key",
")",
"api_key",
".",
"id",
"=",
"utils",
".",
"human_uuid",
"(",
")",
"api_key",
".",
"secret",
"=",
"utils",
".",
"password_uuid",
"(",
")",
"save_admin_log",
"(",
"build",
",",
"created_api_key",
"=",
"True",
",",
"message",
"=",
"api_key",
".",
"id",
")",
"db",
".",
"session",
".",
"add",
"(",
"api_key",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"logging",
".",
"info",
"(",
"'Created API key=%r for build_id=%r'",
",",
"api_key",
".",
"id",
",",
"build",
".",
"id",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'manage_api_keys'",
",",
"build_id",
"=",
"build",
".",
"id",
")",
")",
"create_form",
".",
"build_id",
".",
"data",
"=",
"build",
".",
"id",
"api_key_query",
"=",
"(",
"models",
".",
"ApiKey",
".",
"query",
".",
"filter_by",
"(",
"build_id",
"=",
"build",
".",
"id",
")",
".",
"order_by",
"(",
"models",
".",
"ApiKey",
".",
"created",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"1000",
")",
")",
"revoke_form_list",
"=",
"[",
"]",
"for",
"api_key",
"in",
"api_key_query",
":",
"form",
"=",
"forms",
".",
"RevokeApiKeyForm",
"(",
")",
"form",
".",
"id",
".",
"data",
"=",
"api_key",
".",
"id",
"form",
".",
"build_id",
".",
"data",
"=",
"build",
".",
"id",
"form",
".",
"revoke",
".",
"data",
"=",
"True",
"revoke_form_list",
".",
"append",
"(",
"(",
"api_key",
",",
"form",
")",
")",
"return",
"render_template",
"(",
"'view_api_keys.html'",
",",
"build",
"=",
"build",
",",
"create_form",
"=",
"create_form",
",",
"revoke_form_list",
"=",
"revoke_form_list",
")"
]
| Page for viewing and creating API keys. | [
"Page",
"for",
"viewing",
"and",
"creating",
"API",
"keys",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L365-L404 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | revoke_api_key | def revoke_api_key():
"""Form submission handler for revoking API keys."""
build = g.build
form = forms.RevokeApiKeyForm()
if form.validate_on_submit():
api_key = models.ApiKey.query.get(form.id.data)
if api_key.build_id != build.id:
logging.debug('User does not have access to API key=%r',
api_key.id)
abort(403)
api_key.active = False
save_admin_log(build, revoked_api_key=True, message=api_key.id)
db.session.add(api_key)
db.session.commit()
ops = operations.ApiKeyOps(api_key.id, api_key.secret)
ops.evict()
return redirect(url_for('manage_api_keys', build_id=build.id)) | python | def revoke_api_key():
"""Form submission handler for revoking API keys."""
build = g.build
form = forms.RevokeApiKeyForm()
if form.validate_on_submit():
api_key = models.ApiKey.query.get(form.id.data)
if api_key.build_id != build.id:
logging.debug('User does not have access to API key=%r',
api_key.id)
abort(403)
api_key.active = False
save_admin_log(build, revoked_api_key=True, message=api_key.id)
db.session.add(api_key)
db.session.commit()
ops = operations.ApiKeyOps(api_key.id, api_key.secret)
ops.evict()
return redirect(url_for('manage_api_keys', build_id=build.id)) | [
"def",
"revoke_api_key",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"form",
"=",
"forms",
".",
"RevokeApiKeyForm",
"(",
")",
"if",
"form",
".",
"validate_on_submit",
"(",
")",
":",
"api_key",
"=",
"models",
".",
"ApiKey",
".",
"query",
".",
"get",
"(",
"form",
".",
"id",
".",
"data",
")",
"if",
"api_key",
".",
"build_id",
"!=",
"build",
".",
"id",
":",
"logging",
".",
"debug",
"(",
"'User does not have access to API key=%r'",
",",
"api_key",
".",
"id",
")",
"abort",
"(",
"403",
")",
"api_key",
".",
"active",
"=",
"False",
"save_admin_log",
"(",
"build",
",",
"revoked_api_key",
"=",
"True",
",",
"message",
"=",
"api_key",
".",
"id",
")",
"db",
".",
"session",
".",
"add",
"(",
"api_key",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"ops",
"=",
"operations",
".",
"ApiKeyOps",
"(",
"api_key",
".",
"id",
",",
"api_key",
".",
"secret",
")",
"ops",
".",
"evict",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'manage_api_keys'",
",",
"build_id",
"=",
"build",
".",
"id",
")",
")"
]
| Form submission handler for revoking API keys. | [
"Form",
"submission",
"handler",
"for",
"revoking",
"API",
"keys",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L410-L430 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | claim_invitations | def claim_invitations(user):
"""Claims any pending invitations for the given user's email address."""
# See if there are any build invitations present for the user with this
# email address. If so, replace all those invitations with the real user.
invitation_user_id = '%s:%s' % (
models.User.EMAIL_INVITATION, user.email_address)
invitation_user = models.User.query.get(invitation_user_id)
if invitation_user:
invited_build_list = list(invitation_user.builds)
if not invited_build_list:
return
db.session.add(user)
logging.debug('Found %d build admin invitations for id=%r, user=%r',
len(invited_build_list), invitation_user_id, user)
for build in invited_build_list:
build.owners.remove(invitation_user)
if not build.is_owned_by(user.id):
build.owners.append(user)
logging.debug('Claiming invitation for build_id=%r', build.id)
save_admin_log(build, invite_accepted=True)
else:
logging.debug('User already owner of build. '
'id=%r, build_id=%r', user.id, build.id)
db.session.add(build)
db.session.delete(invitation_user)
db.session.commit()
# Re-add the user to the current session so we can query with it.
db.session.add(current_user) | python | def claim_invitations(user):
"""Claims any pending invitations for the given user's email address."""
# See if there are any build invitations present for the user with this
# email address. If so, replace all those invitations with the real user.
invitation_user_id = '%s:%s' % (
models.User.EMAIL_INVITATION, user.email_address)
invitation_user = models.User.query.get(invitation_user_id)
if invitation_user:
invited_build_list = list(invitation_user.builds)
if not invited_build_list:
return
db.session.add(user)
logging.debug('Found %d build admin invitations for id=%r, user=%r',
len(invited_build_list), invitation_user_id, user)
for build in invited_build_list:
build.owners.remove(invitation_user)
if not build.is_owned_by(user.id):
build.owners.append(user)
logging.debug('Claiming invitation for build_id=%r', build.id)
save_admin_log(build, invite_accepted=True)
else:
logging.debug('User already owner of build. '
'id=%r, build_id=%r', user.id, build.id)
db.session.add(build)
db.session.delete(invitation_user)
db.session.commit()
# Re-add the user to the current session so we can query with it.
db.session.add(current_user) | [
"def",
"claim_invitations",
"(",
"user",
")",
":",
"# See if there are any build invitations present for the user with this",
"# email address. If so, replace all those invitations with the real user.",
"invitation_user_id",
"=",
"'%s:%s'",
"%",
"(",
"models",
".",
"User",
".",
"EMAIL_INVITATION",
",",
"user",
".",
"email_address",
")",
"invitation_user",
"=",
"models",
".",
"User",
".",
"query",
".",
"get",
"(",
"invitation_user_id",
")",
"if",
"invitation_user",
":",
"invited_build_list",
"=",
"list",
"(",
"invitation_user",
".",
"builds",
")",
"if",
"not",
"invited_build_list",
":",
"return",
"db",
".",
"session",
".",
"add",
"(",
"user",
")",
"logging",
".",
"debug",
"(",
"'Found %d build admin invitations for id=%r, user=%r'",
",",
"len",
"(",
"invited_build_list",
")",
",",
"invitation_user_id",
",",
"user",
")",
"for",
"build",
"in",
"invited_build_list",
":",
"build",
".",
"owners",
".",
"remove",
"(",
"invitation_user",
")",
"if",
"not",
"build",
".",
"is_owned_by",
"(",
"user",
".",
"id",
")",
":",
"build",
".",
"owners",
".",
"append",
"(",
"user",
")",
"logging",
".",
"debug",
"(",
"'Claiming invitation for build_id=%r'",
",",
"build",
".",
"id",
")",
"save_admin_log",
"(",
"build",
",",
"invite_accepted",
"=",
"True",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"'User already owner of build. '",
"'id=%r, build_id=%r'",
",",
"user",
".",
"id",
",",
"build",
".",
"id",
")",
"db",
".",
"session",
".",
"add",
"(",
"build",
")",
"db",
".",
"session",
".",
"delete",
"(",
"invitation_user",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"# Re-add the user to the current session so we can query with it.",
"db",
".",
"session",
".",
"add",
"(",
"current_user",
")"
]
| Claims any pending invitations for the given user's email address. | [
"Claims",
"any",
"pending",
"invitations",
"for",
"the",
"given",
"user",
"s",
"email",
"address",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L433-L463 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | manage_admins | def manage_admins():
"""Page for viewing and managing build admins."""
build = g.build
# Do not show cached data
db.session.add(build)
db.session.refresh(build)
add_form = forms.AddAdminForm()
if add_form.validate_on_submit():
invitation_user_id = '%s:%s' % (
models.User.EMAIL_INVITATION, add_form.email_address.data)
invitation_user = models.User.query.get(invitation_user_id)
if not invitation_user:
invitation_user = models.User(
id=invitation_user_id,
email_address=add_form.email_address.data)
db.session.add(invitation_user)
db.session.add(build)
db.session.add(invitation_user)
db.session.refresh(build, lockmode='update')
build.owners.append(invitation_user)
save_admin_log(build, invited_new_admin=True,
message=invitation_user.email_address)
db.session.commit()
logging.info('Added user=%r as owner to build_id=%r',
invitation_user.id, build.id)
return redirect(url_for('manage_admins', build_id=build.id))
add_form.build_id.data = build.id
revoke_form_list = []
for user in build.owners:
form = forms.RemoveAdminForm()
form.user_id.data = user.id
form.build_id.data = build.id
form.revoke.data = True
revoke_form_list.append((user, form))
return render_template(
'view_admins.html',
build=build,
add_form=add_form,
revoke_form_list=revoke_form_list) | python | def manage_admins():
"""Page for viewing and managing build admins."""
build = g.build
# Do not show cached data
db.session.add(build)
db.session.refresh(build)
add_form = forms.AddAdminForm()
if add_form.validate_on_submit():
invitation_user_id = '%s:%s' % (
models.User.EMAIL_INVITATION, add_form.email_address.data)
invitation_user = models.User.query.get(invitation_user_id)
if not invitation_user:
invitation_user = models.User(
id=invitation_user_id,
email_address=add_form.email_address.data)
db.session.add(invitation_user)
db.session.add(build)
db.session.add(invitation_user)
db.session.refresh(build, lockmode='update')
build.owners.append(invitation_user)
save_admin_log(build, invited_new_admin=True,
message=invitation_user.email_address)
db.session.commit()
logging.info('Added user=%r as owner to build_id=%r',
invitation_user.id, build.id)
return redirect(url_for('manage_admins', build_id=build.id))
add_form.build_id.data = build.id
revoke_form_list = []
for user in build.owners:
form = forms.RemoveAdminForm()
form.user_id.data = user.id
form.build_id.data = build.id
form.revoke.data = True
revoke_form_list.append((user, form))
return render_template(
'view_admins.html',
build=build,
add_form=add_form,
revoke_form_list=revoke_form_list) | [
"def",
"manage_admins",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"# Do not show cached data",
"db",
".",
"session",
".",
"add",
"(",
"build",
")",
"db",
".",
"session",
".",
"refresh",
"(",
"build",
")",
"add_form",
"=",
"forms",
".",
"AddAdminForm",
"(",
")",
"if",
"add_form",
".",
"validate_on_submit",
"(",
")",
":",
"invitation_user_id",
"=",
"'%s:%s'",
"%",
"(",
"models",
".",
"User",
".",
"EMAIL_INVITATION",
",",
"add_form",
".",
"email_address",
".",
"data",
")",
"invitation_user",
"=",
"models",
".",
"User",
".",
"query",
".",
"get",
"(",
"invitation_user_id",
")",
"if",
"not",
"invitation_user",
":",
"invitation_user",
"=",
"models",
".",
"User",
"(",
"id",
"=",
"invitation_user_id",
",",
"email_address",
"=",
"add_form",
".",
"email_address",
".",
"data",
")",
"db",
".",
"session",
".",
"add",
"(",
"invitation_user",
")",
"db",
".",
"session",
".",
"add",
"(",
"build",
")",
"db",
".",
"session",
".",
"add",
"(",
"invitation_user",
")",
"db",
".",
"session",
".",
"refresh",
"(",
"build",
",",
"lockmode",
"=",
"'update'",
")",
"build",
".",
"owners",
".",
"append",
"(",
"invitation_user",
")",
"save_admin_log",
"(",
"build",
",",
"invited_new_admin",
"=",
"True",
",",
"message",
"=",
"invitation_user",
".",
"email_address",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"logging",
".",
"info",
"(",
"'Added user=%r as owner to build_id=%r'",
",",
"invitation_user",
".",
"id",
",",
"build",
".",
"id",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'manage_admins'",
",",
"build_id",
"=",
"build",
".",
"id",
")",
")",
"add_form",
".",
"build_id",
".",
"data",
"=",
"build",
".",
"id",
"revoke_form_list",
"=",
"[",
"]",
"for",
"user",
"in",
"build",
".",
"owners",
":",
"form",
"=",
"forms",
".",
"RemoveAdminForm",
"(",
")",
"form",
".",
"user_id",
".",
"data",
"=",
"user",
".",
"id",
"form",
".",
"build_id",
".",
"data",
"=",
"build",
".",
"id",
"form",
".",
"revoke",
".",
"data",
"=",
"True",
"revoke_form_list",
".",
"append",
"(",
"(",
"user",
",",
"form",
")",
")",
"return",
"render_template",
"(",
"'view_admins.html'",
",",
"build",
"=",
"build",
",",
"add_form",
"=",
"add_form",
",",
"revoke_form_list",
"=",
"revoke_form_list",
")"
]
| Page for viewing and managing build admins. | [
"Page",
"for",
"viewing",
"and",
"managing",
"build",
"admins",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L469-L518 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | revoke_admin | def revoke_admin():
"""Form submission handler for revoking admin access to a build."""
build = g.build
form = forms.RemoveAdminForm()
if form.validate_on_submit():
user = models.User.query.get(form.user_id.data)
if not user:
logging.debug('User being revoked admin access does not exist.'
'id=%r, build_id=%r', form.user_id.data, build.id)
abort(400)
if user == current_user:
logging.debug('User trying to remove themself as admin. '
'id=%r, build_id=%r', user.id, build.id)
abort(400)
db.session.add(build)
db.session.add(user)
db.session.refresh(build, lockmode='update')
db.session.refresh(user, lockmode='update')
user_is_owner = build.owners.filter_by(id=user.id)
if not user_is_owner:
logging.debug('User being revoked admin access is not owner. '
'id=%r, build_id=%r.', user.id, build.id)
abort(400)
build.owners.remove(user)
save_admin_log(build, revoked_admin=True, message=user.email_address)
db.session.commit()
operations.UserOps(user.get_id()).evict()
return redirect(url_for('manage_admins', build_id=build.id)) | python | def revoke_admin():
"""Form submission handler for revoking admin access to a build."""
build = g.build
form = forms.RemoveAdminForm()
if form.validate_on_submit():
user = models.User.query.get(form.user_id.data)
if not user:
logging.debug('User being revoked admin access does not exist.'
'id=%r, build_id=%r', form.user_id.data, build.id)
abort(400)
if user == current_user:
logging.debug('User trying to remove themself as admin. '
'id=%r, build_id=%r', user.id, build.id)
abort(400)
db.session.add(build)
db.session.add(user)
db.session.refresh(build, lockmode='update')
db.session.refresh(user, lockmode='update')
user_is_owner = build.owners.filter_by(id=user.id)
if not user_is_owner:
logging.debug('User being revoked admin access is not owner. '
'id=%r, build_id=%r.', user.id, build.id)
abort(400)
build.owners.remove(user)
save_admin_log(build, revoked_admin=True, message=user.email_address)
db.session.commit()
operations.UserOps(user.get_id()).evict()
return redirect(url_for('manage_admins', build_id=build.id)) | [
"def",
"revoke_admin",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"form",
"=",
"forms",
".",
"RemoveAdminForm",
"(",
")",
"if",
"form",
".",
"validate_on_submit",
"(",
")",
":",
"user",
"=",
"models",
".",
"User",
".",
"query",
".",
"get",
"(",
"form",
".",
"user_id",
".",
"data",
")",
"if",
"not",
"user",
":",
"logging",
".",
"debug",
"(",
"'User being revoked admin access does not exist.'",
"'id=%r, build_id=%r'",
",",
"form",
".",
"user_id",
".",
"data",
",",
"build",
".",
"id",
")",
"abort",
"(",
"400",
")",
"if",
"user",
"==",
"current_user",
":",
"logging",
".",
"debug",
"(",
"'User trying to remove themself as admin. '",
"'id=%r, build_id=%r'",
",",
"user",
".",
"id",
",",
"build",
".",
"id",
")",
"abort",
"(",
"400",
")",
"db",
".",
"session",
".",
"add",
"(",
"build",
")",
"db",
".",
"session",
".",
"add",
"(",
"user",
")",
"db",
".",
"session",
".",
"refresh",
"(",
"build",
",",
"lockmode",
"=",
"'update'",
")",
"db",
".",
"session",
".",
"refresh",
"(",
"user",
",",
"lockmode",
"=",
"'update'",
")",
"user_is_owner",
"=",
"build",
".",
"owners",
".",
"filter_by",
"(",
"id",
"=",
"user",
".",
"id",
")",
"if",
"not",
"user_is_owner",
":",
"logging",
".",
"debug",
"(",
"'User being revoked admin access is not owner. '",
"'id=%r, build_id=%r.'",
",",
"user",
".",
"id",
",",
"build",
".",
"id",
")",
"abort",
"(",
"400",
")",
"build",
".",
"owners",
".",
"remove",
"(",
"user",
")",
"save_admin_log",
"(",
"build",
",",
"revoked_admin",
"=",
"True",
",",
"message",
"=",
"user",
".",
"email_address",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"operations",
".",
"UserOps",
"(",
"user",
".",
"get_id",
"(",
")",
")",
".",
"evict",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'manage_admins'",
",",
"build_id",
"=",
"build",
".",
"id",
")",
")"
]
| Form submission handler for revoking admin access to a build. | [
"Form",
"submission",
"handler",
"for",
"revoking",
"admin",
"access",
"to",
"a",
"build",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L524-L558 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | save_admin_log | def save_admin_log(build, **kwargs):
"""Saves an action to the admin log."""
message = kwargs.pop('message', None)
release = kwargs.pop('release', None)
run = kwargs.pop('run', None)
if not len(kwargs) == 1:
raise TypeError('Must specify a LOG_TYPE argument')
log_enum = kwargs.keys()[0]
log_type = getattr(models.AdminLog, log_enum.upper(), None)
if not log_type:
raise TypeError('Bad log_type argument: %s' % log_enum)
if current_user.is_anonymous():
user_id = None
else:
user_id = current_user.get_id()
log = models.AdminLog(
build_id=build.id,
log_type=log_type,
message=message,
user_id=user_id)
if release:
log.release_id = release.id
if run:
log.run_id = run.id
log.release_id = run.release_id
db.session.add(log) | python | def save_admin_log(build, **kwargs):
"""Saves an action to the admin log."""
message = kwargs.pop('message', None)
release = kwargs.pop('release', None)
run = kwargs.pop('run', None)
if not len(kwargs) == 1:
raise TypeError('Must specify a LOG_TYPE argument')
log_enum = kwargs.keys()[0]
log_type = getattr(models.AdminLog, log_enum.upper(), None)
if not log_type:
raise TypeError('Bad log_type argument: %s' % log_enum)
if current_user.is_anonymous():
user_id = None
else:
user_id = current_user.get_id()
log = models.AdminLog(
build_id=build.id,
log_type=log_type,
message=message,
user_id=user_id)
if release:
log.release_id = release.id
if run:
log.run_id = run.id
log.release_id = run.release_id
db.session.add(log) | [
"def",
"save_admin_log",
"(",
"build",
",",
"*",
"*",
"kwargs",
")",
":",
"message",
"=",
"kwargs",
".",
"pop",
"(",
"'message'",
",",
"None",
")",
"release",
"=",
"kwargs",
".",
"pop",
"(",
"'release'",
",",
"None",
")",
"run",
"=",
"kwargs",
".",
"pop",
"(",
"'run'",
",",
"None",
")",
"if",
"not",
"len",
"(",
"kwargs",
")",
"==",
"1",
":",
"raise",
"TypeError",
"(",
"'Must specify a LOG_TYPE argument'",
")",
"log_enum",
"=",
"kwargs",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"log_type",
"=",
"getattr",
"(",
"models",
".",
"AdminLog",
",",
"log_enum",
".",
"upper",
"(",
")",
",",
"None",
")",
"if",
"not",
"log_type",
":",
"raise",
"TypeError",
"(",
"'Bad log_type argument: %s'",
"%",
"log_enum",
")",
"if",
"current_user",
".",
"is_anonymous",
"(",
")",
":",
"user_id",
"=",
"None",
"else",
":",
"user_id",
"=",
"current_user",
".",
"get_id",
"(",
")",
"log",
"=",
"models",
".",
"AdminLog",
"(",
"build_id",
"=",
"build",
".",
"id",
",",
"log_type",
"=",
"log_type",
",",
"message",
"=",
"message",
",",
"user_id",
"=",
"user_id",
")",
"if",
"release",
":",
"log",
".",
"release_id",
"=",
"release",
".",
"id",
"if",
"run",
":",
"log",
".",
"run_id",
"=",
"run",
".",
"id",
"log",
".",
"release_id",
"=",
"run",
".",
"release_id",
"db",
".",
"session",
".",
"add",
"(",
"log",
")"
]
| Saves an action to the admin log. | [
"Saves",
"an",
"action",
"to",
"the",
"admin",
"log",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L561-L593 | train |
bslatkin/dpxdt | dpxdt/server/auth.py | view_admin_log | def view_admin_log():
"""Page for viewing the log of admin activity."""
build = g.build
# TODO: Add paging
log_list = (
models.AdminLog.query
.filter_by(build_id=build.id)
.order_by(models.AdminLog.created.desc())
.all())
return render_template(
'view_admin_log.html',
build=build,
log_list=log_list) | python | def view_admin_log():
"""Page for viewing the log of admin activity."""
build = g.build
# TODO: Add paging
log_list = (
models.AdminLog.query
.filter_by(build_id=build.id)
.order_by(models.AdminLog.created.desc())
.all())
return render_template(
'view_admin_log.html',
build=build,
log_list=log_list) | [
"def",
"view_admin_log",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"# TODO: Add paging",
"log_list",
"=",
"(",
"models",
".",
"AdminLog",
".",
"query",
".",
"filter_by",
"(",
"build_id",
"=",
"build",
".",
"id",
")",
".",
"order_by",
"(",
"models",
".",
"AdminLog",
".",
"created",
".",
"desc",
"(",
")",
")",
".",
"all",
"(",
")",
")",
"return",
"render_template",
"(",
"'view_admin_log.html'",
",",
"build",
"=",
"build",
",",
"log_list",
"=",
"log_list",
")"
]
| Page for viewing the log of admin activity. | [
"Page",
"for",
"viewing",
"the",
"log",
"of",
"admin",
"activity",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L599-L614 | train |
bslatkin/dpxdt | dpxdt/client/utils.py | verify_binary | def verify_binary(flag_name, process_args=None):
"""Exits the program if the binary from the given flag doesn't run.
Args:
flag_name: Name of the flag that should be the path to the binary.
process_args: Args to pass to the binary to do nothing but verify
that it's working correctly (something like "--version") is good.
Optional. Defaults to no args.
Raises:
SystemExit with error if the process did not work.
"""
if process_args is None:
process_args = []
path = getattr(FLAGS, flag_name)
if not path:
logging.error('Flag %r not set' % flag_name)
sys.exit(1)
with open(os.devnull, 'w') as dev_null:
try:
subprocess.check_call(
[path] + process_args,
stdout=dev_null,
stderr=subprocess.STDOUT)
except:
logging.exception('--%s binary at path %r does not work',
flag_name, path)
sys.exit(1) | python | def verify_binary(flag_name, process_args=None):
"""Exits the program if the binary from the given flag doesn't run.
Args:
flag_name: Name of the flag that should be the path to the binary.
process_args: Args to pass to the binary to do nothing but verify
that it's working correctly (something like "--version") is good.
Optional. Defaults to no args.
Raises:
SystemExit with error if the process did not work.
"""
if process_args is None:
process_args = []
path = getattr(FLAGS, flag_name)
if not path:
logging.error('Flag %r not set' % flag_name)
sys.exit(1)
with open(os.devnull, 'w') as dev_null:
try:
subprocess.check_call(
[path] + process_args,
stdout=dev_null,
stderr=subprocess.STDOUT)
except:
logging.exception('--%s binary at path %r does not work',
flag_name, path)
sys.exit(1) | [
"def",
"verify_binary",
"(",
"flag_name",
",",
"process_args",
"=",
"None",
")",
":",
"if",
"process_args",
"is",
"None",
":",
"process_args",
"=",
"[",
"]",
"path",
"=",
"getattr",
"(",
"FLAGS",
",",
"flag_name",
")",
"if",
"not",
"path",
":",
"logging",
".",
"error",
"(",
"'Flag %r not set'",
"%",
"flag_name",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"with",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"as",
"dev_null",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"path",
"]",
"+",
"process_args",
",",
"stdout",
"=",
"dev_null",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"except",
":",
"logging",
".",
"exception",
"(",
"'--%s binary at path %r does not work'",
",",
"flag_name",
",",
"path",
")",
"sys",
".",
"exit",
"(",
"1",
")"
]
| Exits the program if the binary from the given flag doesn't run.
Args:
flag_name: Name of the flag that should be the path to the binary.
process_args: Args to pass to the binary to do nothing but verify
that it's working correctly (something like "--version") is good.
Optional. Defaults to no args.
Raises:
SystemExit with error if the process did not work. | [
"Exits",
"the",
"program",
"if",
"the",
"binary",
"from",
"the",
"given",
"flag",
"doesn",
"t",
"run",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/utils.py#L28-L57 | train |
bslatkin/dpxdt | dpxdt/server/api.py | create_release | def create_release():
"""Creates a new release candidate for a build."""
build = g.build
release_name = request.form.get('release_name')
utils.jsonify_assert(release_name, 'release_name required')
url = request.form.get('url')
utils.jsonify_assert(release_name, 'url required')
release = models.Release(
name=release_name,
url=url,
number=1,
build_id=build.id)
last_candidate = (
models.Release.query
.filter_by(build_id=build.id, name=release_name)
.order_by(models.Release.number.desc())
.first())
if last_candidate:
release.number += last_candidate.number
if last_candidate.status == models.Release.PROCESSING:
canceled_task_count = work_queue.cancel(
release_id=last_candidate.id)
logging.info('Canceling %d tasks for previous attempt '
'build_id=%r, release_name=%r, release_number=%d',
canceled_task_count, build.id, last_candidate.name,
last_candidate.number)
last_candidate.status = models.Release.BAD
db.session.add(last_candidate)
db.session.add(release)
db.session.commit()
signals.release_updated_via_api.send(app, build=build, release=release)
logging.info('Created release: build_id=%r, release_name=%r, url=%r, '
'release_number=%d', build.id, release.name,
url, release.number)
return flask.jsonify(
success=True,
build_id=build.id,
release_name=release.name,
release_number=release.number,
url=url) | python | def create_release():
"""Creates a new release candidate for a build."""
build = g.build
release_name = request.form.get('release_name')
utils.jsonify_assert(release_name, 'release_name required')
url = request.form.get('url')
utils.jsonify_assert(release_name, 'url required')
release = models.Release(
name=release_name,
url=url,
number=1,
build_id=build.id)
last_candidate = (
models.Release.query
.filter_by(build_id=build.id, name=release_name)
.order_by(models.Release.number.desc())
.first())
if last_candidate:
release.number += last_candidate.number
if last_candidate.status == models.Release.PROCESSING:
canceled_task_count = work_queue.cancel(
release_id=last_candidate.id)
logging.info('Canceling %d tasks for previous attempt '
'build_id=%r, release_name=%r, release_number=%d',
canceled_task_count, build.id, last_candidate.name,
last_candidate.number)
last_candidate.status = models.Release.BAD
db.session.add(last_candidate)
db.session.add(release)
db.session.commit()
signals.release_updated_via_api.send(app, build=build, release=release)
logging.info('Created release: build_id=%r, release_name=%r, url=%r, '
'release_number=%d', build.id, release.name,
url, release.number)
return flask.jsonify(
success=True,
build_id=build.id,
release_name=release.name,
release_number=release.number,
url=url) | [
"def",
"create_release",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"release_name",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'release_name'",
")",
"utils",
".",
"jsonify_assert",
"(",
"release_name",
",",
"'release_name required'",
")",
"url",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'url'",
")",
"utils",
".",
"jsonify_assert",
"(",
"release_name",
",",
"'url required'",
")",
"release",
"=",
"models",
".",
"Release",
"(",
"name",
"=",
"release_name",
",",
"url",
"=",
"url",
",",
"number",
"=",
"1",
",",
"build_id",
"=",
"build",
".",
"id",
")",
"last_candidate",
"=",
"(",
"models",
".",
"Release",
".",
"query",
".",
"filter_by",
"(",
"build_id",
"=",
"build",
".",
"id",
",",
"name",
"=",
"release_name",
")",
".",
"order_by",
"(",
"models",
".",
"Release",
".",
"number",
".",
"desc",
"(",
")",
")",
".",
"first",
"(",
")",
")",
"if",
"last_candidate",
":",
"release",
".",
"number",
"+=",
"last_candidate",
".",
"number",
"if",
"last_candidate",
".",
"status",
"==",
"models",
".",
"Release",
".",
"PROCESSING",
":",
"canceled_task_count",
"=",
"work_queue",
".",
"cancel",
"(",
"release_id",
"=",
"last_candidate",
".",
"id",
")",
"logging",
".",
"info",
"(",
"'Canceling %d tasks for previous attempt '",
"'build_id=%r, release_name=%r, release_number=%d'",
",",
"canceled_task_count",
",",
"build",
".",
"id",
",",
"last_candidate",
".",
"name",
",",
"last_candidate",
".",
"number",
")",
"last_candidate",
".",
"status",
"=",
"models",
".",
"Release",
".",
"BAD",
"db",
".",
"session",
".",
"add",
"(",
"last_candidate",
")",
"db",
".",
"session",
".",
"add",
"(",
"release",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"signals",
".",
"release_updated_via_api",
".",
"send",
"(",
"app",
",",
"build",
"=",
"build",
",",
"release",
"=",
"release",
")",
"logging",
".",
"info",
"(",
"'Created release: build_id=%r, release_name=%r, url=%r, '",
"'release_number=%d'",
",",
"build",
".",
"id",
",",
"release",
".",
"name",
",",
"url",
",",
"release",
".",
"number",
")",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
",",
"build_id",
"=",
"build",
".",
"id",
",",
"release_name",
"=",
"release",
".",
"name",
",",
"release_number",
"=",
"release",
".",
"number",
",",
"url",
"=",
"url",
")"
]
| Creates a new release candidate for a build. | [
"Creates",
"a",
"new",
"release",
"candidate",
"for",
"a",
"build",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L109-L154 | train |
bslatkin/dpxdt | dpxdt/server/api.py | _check_release_done_processing | def _check_release_done_processing(release):
"""Moves a release candidate to reviewing if all runs are done."""
if release.status != models.Release.PROCESSING:
# NOTE: This statement also guards for situations where the user has
# prematurely specified that the release is good or bad. Once the user
# has done that, the system will not automatically move the release
# back into the 'reviewing' state or send the email notification below.
logging.info('Release not in processing state yet: build_id=%r, '
'name=%r, number=%d', release.build_id, release.name,
release.number)
return False
query = models.Run.query.filter_by(release_id=release.id)
for run in query:
if run.status == models.Run.NEEDS_DIFF:
# Still waiting for the diff to finish.
return False
if run.ref_config and not run.ref_image:
# Still waiting for the ref capture to process.
return False
if run.config and not run.image:
# Still waiting for the run capture to process.
return False
logging.info('Release done processing, now reviewing: build_id=%r, '
'name=%r, number=%d', release.build_id, release.name,
release.number)
# Send the email at the end of this request so we know it's only
# sent a single time (guarded by the release.status check above).
build_id = release.build_id
release_name = release.name
release_number = release.number
@utils.after_this_request
def send_notification_email(response):
emails.send_ready_for_review(build_id, release_name, release_number)
release.status = models.Release.REVIEWING
db.session.add(release)
return True | python | def _check_release_done_processing(release):
"""Moves a release candidate to reviewing if all runs are done."""
if release.status != models.Release.PROCESSING:
# NOTE: This statement also guards for situations where the user has
# prematurely specified that the release is good or bad. Once the user
# has done that, the system will not automatically move the release
# back into the 'reviewing' state or send the email notification below.
logging.info('Release not in processing state yet: build_id=%r, '
'name=%r, number=%d', release.build_id, release.name,
release.number)
return False
query = models.Run.query.filter_by(release_id=release.id)
for run in query:
if run.status == models.Run.NEEDS_DIFF:
# Still waiting for the diff to finish.
return False
if run.ref_config and not run.ref_image:
# Still waiting for the ref capture to process.
return False
if run.config and not run.image:
# Still waiting for the run capture to process.
return False
logging.info('Release done processing, now reviewing: build_id=%r, '
'name=%r, number=%d', release.build_id, release.name,
release.number)
# Send the email at the end of this request so we know it's only
# sent a single time (guarded by the release.status check above).
build_id = release.build_id
release_name = release.name
release_number = release.number
@utils.after_this_request
def send_notification_email(response):
emails.send_ready_for_review(build_id, release_name, release_number)
release.status = models.Release.REVIEWING
db.session.add(release)
return True | [
"def",
"_check_release_done_processing",
"(",
"release",
")",
":",
"if",
"release",
".",
"status",
"!=",
"models",
".",
"Release",
".",
"PROCESSING",
":",
"# NOTE: This statement also guards for situations where the user has",
"# prematurely specified that the release is good or bad. Once the user",
"# has done that, the system will not automatically move the release",
"# back into the 'reviewing' state or send the email notification below.",
"logging",
".",
"info",
"(",
"'Release not in processing state yet: build_id=%r, '",
"'name=%r, number=%d'",
",",
"release",
".",
"build_id",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
")",
"return",
"False",
"query",
"=",
"models",
".",
"Run",
".",
"query",
".",
"filter_by",
"(",
"release_id",
"=",
"release",
".",
"id",
")",
"for",
"run",
"in",
"query",
":",
"if",
"run",
".",
"status",
"==",
"models",
".",
"Run",
".",
"NEEDS_DIFF",
":",
"# Still waiting for the diff to finish.",
"return",
"False",
"if",
"run",
".",
"ref_config",
"and",
"not",
"run",
".",
"ref_image",
":",
"# Still waiting for the ref capture to process.",
"return",
"False",
"if",
"run",
".",
"config",
"and",
"not",
"run",
".",
"image",
":",
"# Still waiting for the run capture to process.",
"return",
"False",
"logging",
".",
"info",
"(",
"'Release done processing, now reviewing: build_id=%r, '",
"'name=%r, number=%d'",
",",
"release",
".",
"build_id",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
")",
"# Send the email at the end of this request so we know it's only",
"# sent a single time (guarded by the release.status check above).",
"build_id",
"=",
"release",
".",
"build_id",
"release_name",
"=",
"release",
".",
"name",
"release_number",
"=",
"release",
".",
"number",
"@",
"utils",
".",
"after_this_request",
"def",
"send_notification_email",
"(",
"response",
")",
":",
"emails",
".",
"send_ready_for_review",
"(",
"build_id",
",",
"release_name",
",",
"release_number",
")",
"release",
".",
"status",
"=",
"models",
".",
"Release",
".",
"REVIEWING",
"db",
".",
"session",
".",
"add",
"(",
"release",
")",
"return",
"True"
]
| Moves a release candidate to reviewing if all runs are done. | [
"Moves",
"a",
"release",
"candidate",
"to",
"reviewing",
"if",
"all",
"runs",
"are",
"done",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L157-L197 | train |
bslatkin/dpxdt | dpxdt/server/api.py | _get_release_params | def _get_release_params():
"""Gets the release params from the current request."""
release_name = request.form.get('release_name')
utils.jsonify_assert(release_name, 'release_name required')
release_number = request.form.get('release_number', type=int)
utils.jsonify_assert(release_number is not None, 'release_number required')
return release_name, release_number | python | def _get_release_params():
"""Gets the release params from the current request."""
release_name = request.form.get('release_name')
utils.jsonify_assert(release_name, 'release_name required')
release_number = request.form.get('release_number', type=int)
utils.jsonify_assert(release_number is not None, 'release_number required')
return release_name, release_number | [
"def",
"_get_release_params",
"(",
")",
":",
"release_name",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'release_name'",
")",
"utils",
".",
"jsonify_assert",
"(",
"release_name",
",",
"'release_name required'",
")",
"release_number",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'release_number'",
",",
"type",
"=",
"int",
")",
"utils",
".",
"jsonify_assert",
"(",
"release_number",
"is",
"not",
"None",
",",
"'release_number required'",
")",
"return",
"release_name",
",",
"release_number"
]
| Gets the release params from the current request. | [
"Gets",
"the",
"release",
"params",
"from",
"the",
"current",
"request",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L200-L206 | train |
bslatkin/dpxdt | dpxdt/server/api.py | _find_last_good_run | def _find_last_good_run(build):
"""Finds the last good release and run for a build."""
run_name = request.form.get('run_name', type=str)
utils.jsonify_assert(run_name, 'run_name required')
last_good_release = (
models.Release.query
.filter_by(
build_id=build.id,
status=models.Release.GOOD)
.order_by(models.Release.created.desc())
.first())
last_good_run = None
if last_good_release:
logging.debug('Found last good release for: build_id=%r, '
'release_name=%r, release_number=%d',
build.id, last_good_release.name,
last_good_release.number)
last_good_run = (
models.Run.query
.filter_by(release_id=last_good_release.id, name=run_name)
.first())
if last_good_run:
logging.debug('Found last good run for: build_id=%r, '
'release_name=%r, release_number=%d, '
'run_name=%r',
build.id, last_good_release.name,
last_good_release.number, last_good_run.name)
return last_good_release, last_good_run | python | def _find_last_good_run(build):
"""Finds the last good release and run for a build."""
run_name = request.form.get('run_name', type=str)
utils.jsonify_assert(run_name, 'run_name required')
last_good_release = (
models.Release.query
.filter_by(
build_id=build.id,
status=models.Release.GOOD)
.order_by(models.Release.created.desc())
.first())
last_good_run = None
if last_good_release:
logging.debug('Found last good release for: build_id=%r, '
'release_name=%r, release_number=%d',
build.id, last_good_release.name,
last_good_release.number)
last_good_run = (
models.Run.query
.filter_by(release_id=last_good_release.id, name=run_name)
.first())
if last_good_run:
logging.debug('Found last good run for: build_id=%r, '
'release_name=%r, release_number=%d, '
'run_name=%r',
build.id, last_good_release.name,
last_good_release.number, last_good_run.name)
return last_good_release, last_good_run | [
"def",
"_find_last_good_run",
"(",
"build",
")",
":",
"run_name",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'run_name'",
",",
"type",
"=",
"str",
")",
"utils",
".",
"jsonify_assert",
"(",
"run_name",
",",
"'run_name required'",
")",
"last_good_release",
"=",
"(",
"models",
".",
"Release",
".",
"query",
".",
"filter_by",
"(",
"build_id",
"=",
"build",
".",
"id",
",",
"status",
"=",
"models",
".",
"Release",
".",
"GOOD",
")",
".",
"order_by",
"(",
"models",
".",
"Release",
".",
"created",
".",
"desc",
"(",
")",
")",
".",
"first",
"(",
")",
")",
"last_good_run",
"=",
"None",
"if",
"last_good_release",
":",
"logging",
".",
"debug",
"(",
"'Found last good release for: build_id=%r, '",
"'release_name=%r, release_number=%d'",
",",
"build",
".",
"id",
",",
"last_good_release",
".",
"name",
",",
"last_good_release",
".",
"number",
")",
"last_good_run",
"=",
"(",
"models",
".",
"Run",
".",
"query",
".",
"filter_by",
"(",
"release_id",
"=",
"last_good_release",
".",
"id",
",",
"name",
"=",
"run_name",
")",
".",
"first",
"(",
")",
")",
"if",
"last_good_run",
":",
"logging",
".",
"debug",
"(",
"'Found last good run for: build_id=%r, '",
"'release_name=%r, release_number=%d, '",
"'run_name=%r'",
",",
"build",
".",
"id",
",",
"last_good_release",
".",
"name",
",",
"last_good_release",
".",
"number",
",",
"last_good_run",
".",
"name",
")",
"return",
"last_good_release",
",",
"last_good_run"
]
| Finds the last good release and run for a build. | [
"Finds",
"the",
"last",
"good",
"release",
"and",
"run",
"for",
"a",
"build",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L209-L240 | train |
bslatkin/dpxdt | dpxdt/server/api.py | find_run | def find_run():
"""Finds the last good run of the given name for a release."""
build = g.build
last_good_release, last_good_run = _find_last_good_run(build)
if last_good_run:
return flask.jsonify(
success=True,
build_id=build.id,
release_name=last_good_release.name,
release_number=last_good_release.number,
run_name=last_good_run.name,
url=last_good_run.url,
image=last_good_run.image,
log=last_good_run.log,
config=last_good_run.config)
return utils.jsonify_error('Run not found') | python | def find_run():
"""Finds the last good run of the given name for a release."""
build = g.build
last_good_release, last_good_run = _find_last_good_run(build)
if last_good_run:
return flask.jsonify(
success=True,
build_id=build.id,
release_name=last_good_release.name,
release_number=last_good_release.number,
run_name=last_good_run.name,
url=last_good_run.url,
image=last_good_run.image,
log=last_good_run.log,
config=last_good_run.config)
return utils.jsonify_error('Run not found') | [
"def",
"find_run",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"last_good_release",
",",
"last_good_run",
"=",
"_find_last_good_run",
"(",
"build",
")",
"if",
"last_good_run",
":",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
",",
"build_id",
"=",
"build",
".",
"id",
",",
"release_name",
"=",
"last_good_release",
".",
"name",
",",
"release_number",
"=",
"last_good_release",
".",
"number",
",",
"run_name",
"=",
"last_good_run",
".",
"name",
",",
"url",
"=",
"last_good_run",
".",
"url",
",",
"image",
"=",
"last_good_run",
".",
"image",
",",
"log",
"=",
"last_good_run",
".",
"log",
",",
"config",
"=",
"last_good_run",
".",
"config",
")",
"return",
"utils",
".",
"jsonify_error",
"(",
"'Run not found'",
")"
]
| Finds the last good run of the given name for a release. | [
"Finds",
"the",
"last",
"good",
"run",
"of",
"the",
"given",
"name",
"for",
"a",
"release",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L245-L262 | train |
bslatkin/dpxdt | dpxdt/server/api.py | _get_or_create_run | def _get_or_create_run(build):
"""Gets a run for a build or creates it if it does not exist."""
release_name, release_number = _get_release_params()
run_name = request.form.get('run_name', type=str)
utils.jsonify_assert(run_name, 'run_name required')
release = (
models.Release.query
.filter_by(build_id=build.id, name=release_name, number=release_number)
.first())
utils.jsonify_assert(release, 'release does not exist')
run = (
models.Run.query
.filter_by(release_id=release.id, name=run_name)
.first())
if not run:
# Ignore re-reports of the same run name for this release.
logging.info('Created run: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r',
build.id, release.name, release.number, run_name)
run = models.Run(
release_id=release.id,
name=run_name,
status=models.Run.DATA_PENDING)
db.session.add(run)
db.session.flush()
return release, run | python | def _get_or_create_run(build):
"""Gets a run for a build or creates it if it does not exist."""
release_name, release_number = _get_release_params()
run_name = request.form.get('run_name', type=str)
utils.jsonify_assert(run_name, 'run_name required')
release = (
models.Release.query
.filter_by(build_id=build.id, name=release_name, number=release_number)
.first())
utils.jsonify_assert(release, 'release does not exist')
run = (
models.Run.query
.filter_by(release_id=release.id, name=run_name)
.first())
if not run:
# Ignore re-reports of the same run name for this release.
logging.info('Created run: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r',
build.id, release.name, release.number, run_name)
run = models.Run(
release_id=release.id,
name=run_name,
status=models.Run.DATA_PENDING)
db.session.add(run)
db.session.flush()
return release, run | [
"def",
"_get_or_create_run",
"(",
"build",
")",
":",
"release_name",
",",
"release_number",
"=",
"_get_release_params",
"(",
")",
"run_name",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'run_name'",
",",
"type",
"=",
"str",
")",
"utils",
".",
"jsonify_assert",
"(",
"run_name",
",",
"'run_name required'",
")",
"release",
"=",
"(",
"models",
".",
"Release",
".",
"query",
".",
"filter_by",
"(",
"build_id",
"=",
"build",
".",
"id",
",",
"name",
"=",
"release_name",
",",
"number",
"=",
"release_number",
")",
".",
"first",
"(",
")",
")",
"utils",
".",
"jsonify_assert",
"(",
"release",
",",
"'release does not exist'",
")",
"run",
"=",
"(",
"models",
".",
"Run",
".",
"query",
".",
"filter_by",
"(",
"release_id",
"=",
"release",
".",
"id",
",",
"name",
"=",
"run_name",
")",
".",
"first",
"(",
")",
")",
"if",
"not",
"run",
":",
"# Ignore re-reports of the same run name for this release.",
"logging",
".",
"info",
"(",
"'Created run: build_id=%r, release_name=%r, '",
"'release_number=%d, run_name=%r'",
",",
"build",
".",
"id",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
",",
"run_name",
")",
"run",
"=",
"models",
".",
"Run",
"(",
"release_id",
"=",
"release",
".",
"id",
",",
"name",
"=",
"run_name",
",",
"status",
"=",
"models",
".",
"Run",
".",
"DATA_PENDING",
")",
"db",
".",
"session",
".",
"add",
"(",
"run",
")",
"db",
".",
"session",
".",
"flush",
"(",
")",
"return",
"release",
",",
"run"
]
| Gets a run for a build or creates it if it does not exist. | [
"Gets",
"a",
"run",
"for",
"a",
"build",
"or",
"creates",
"it",
"if",
"it",
"does",
"not",
"exist",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L265-L293 | train |
bslatkin/dpxdt | dpxdt/server/api.py | _enqueue_capture | def _enqueue_capture(build, release, run, url, config_data, baseline=False):
"""Enqueues a task to run a capture process."""
# Validate the JSON config parses.
try:
config_dict = json.loads(config_data)
except Exception, e:
abort(utils.jsonify_error(e))
# Rewrite the config JSON to include the URL specified in this request.
# Blindly overwrite anything that was there.
config_dict['targetUrl'] = url
config_data = json.dumps(config_dict)
config_artifact = _save_artifact(build, config_data, 'application/json')
db.session.add(config_artifact)
db.session.flush()
suffix = ''
if baseline:
suffix = ':baseline'
task_id = '%s:%s%s' % (run.id, hashlib.sha1(url).hexdigest(), suffix)
logging.info('Enqueueing capture task=%r, baseline=%r', task_id, baseline)
work_queue.add(
constants.CAPTURE_QUEUE_NAME,
payload=dict(
build_id=build.id,
release_name=release.name,
release_number=release.number,
run_name=run.name,
url=url,
config_sha1sum=config_artifact.id,
baseline=baseline,
),
build_id=build.id,
release_id=release.id,
run_id=run.id,
source='request_run',
task_id=task_id)
# Set the URL and config early to indicate to report_run that there is
# still data pending even if 'image' and 'ref_image' are unset.
if baseline:
run.ref_url = url
run.ref_config = config_artifact.id
else:
run.url = url
run.config = config_artifact.id | python | def _enqueue_capture(build, release, run, url, config_data, baseline=False):
"""Enqueues a task to run a capture process."""
# Validate the JSON config parses.
try:
config_dict = json.loads(config_data)
except Exception, e:
abort(utils.jsonify_error(e))
# Rewrite the config JSON to include the URL specified in this request.
# Blindly overwrite anything that was there.
config_dict['targetUrl'] = url
config_data = json.dumps(config_dict)
config_artifact = _save_artifact(build, config_data, 'application/json')
db.session.add(config_artifact)
db.session.flush()
suffix = ''
if baseline:
suffix = ':baseline'
task_id = '%s:%s%s' % (run.id, hashlib.sha1(url).hexdigest(), suffix)
logging.info('Enqueueing capture task=%r, baseline=%r', task_id, baseline)
work_queue.add(
constants.CAPTURE_QUEUE_NAME,
payload=dict(
build_id=build.id,
release_name=release.name,
release_number=release.number,
run_name=run.name,
url=url,
config_sha1sum=config_artifact.id,
baseline=baseline,
),
build_id=build.id,
release_id=release.id,
run_id=run.id,
source='request_run',
task_id=task_id)
# Set the URL and config early to indicate to report_run that there is
# still data pending even if 'image' and 'ref_image' are unset.
if baseline:
run.ref_url = url
run.ref_config = config_artifact.id
else:
run.url = url
run.config = config_artifact.id | [
"def",
"_enqueue_capture",
"(",
"build",
",",
"release",
",",
"run",
",",
"url",
",",
"config_data",
",",
"baseline",
"=",
"False",
")",
":",
"# Validate the JSON config parses.",
"try",
":",
"config_dict",
"=",
"json",
".",
"loads",
"(",
"config_data",
")",
"except",
"Exception",
",",
"e",
":",
"abort",
"(",
"utils",
".",
"jsonify_error",
"(",
"e",
")",
")",
"# Rewrite the config JSON to include the URL specified in this request.",
"# Blindly overwrite anything that was there.",
"config_dict",
"[",
"'targetUrl'",
"]",
"=",
"url",
"config_data",
"=",
"json",
".",
"dumps",
"(",
"config_dict",
")",
"config_artifact",
"=",
"_save_artifact",
"(",
"build",
",",
"config_data",
",",
"'application/json'",
")",
"db",
".",
"session",
".",
"add",
"(",
"config_artifact",
")",
"db",
".",
"session",
".",
"flush",
"(",
")",
"suffix",
"=",
"''",
"if",
"baseline",
":",
"suffix",
"=",
"':baseline'",
"task_id",
"=",
"'%s:%s%s'",
"%",
"(",
"run",
".",
"id",
",",
"hashlib",
".",
"sha1",
"(",
"url",
")",
".",
"hexdigest",
"(",
")",
",",
"suffix",
")",
"logging",
".",
"info",
"(",
"'Enqueueing capture task=%r, baseline=%r'",
",",
"task_id",
",",
"baseline",
")",
"work_queue",
".",
"add",
"(",
"constants",
".",
"CAPTURE_QUEUE_NAME",
",",
"payload",
"=",
"dict",
"(",
"build_id",
"=",
"build",
".",
"id",
",",
"release_name",
"=",
"release",
".",
"name",
",",
"release_number",
"=",
"release",
".",
"number",
",",
"run_name",
"=",
"run",
".",
"name",
",",
"url",
"=",
"url",
",",
"config_sha1sum",
"=",
"config_artifact",
".",
"id",
",",
"baseline",
"=",
"baseline",
",",
")",
",",
"build_id",
"=",
"build",
".",
"id",
",",
"release_id",
"=",
"release",
".",
"id",
",",
"run_id",
"=",
"run",
".",
"id",
",",
"source",
"=",
"'request_run'",
",",
"task_id",
"=",
"task_id",
")",
"# Set the URL and config early to indicate to report_run that there is",
"# still data pending even if 'image' and 'ref_image' are unset.",
"if",
"baseline",
":",
"run",
".",
"ref_url",
"=",
"url",
"run",
".",
"ref_config",
"=",
"config_artifact",
".",
"id",
"else",
":",
"run",
".",
"url",
"=",
"url",
"run",
".",
"config",
"=",
"config_artifact",
".",
"id"
]
| Enqueues a task to run a capture process. | [
"Enqueues",
"a",
"task",
"to",
"run",
"a",
"capture",
"process",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L296-L344 | train |
bslatkin/dpxdt | dpxdt/server/api.py | request_run | def request_run():
"""Requests a new run for a release candidate."""
build = g.build
current_release, current_run = _get_or_create_run(build)
current_url = request.form.get('url', type=str)
config_data = request.form.get('config', default='{}', type=str)
utils.jsonify_assert(current_url, 'url to capture required')
utils.jsonify_assert(config_data, 'config document required')
config_artifact = _enqueue_capture(
build, current_release, current_run, current_url, config_data)
ref_url = request.form.get('ref_url', type=str)
ref_config_data = request.form.get('ref_config', type=str)
utils.jsonify_assert(
bool(ref_url) == bool(ref_config_data),
'ref_url and ref_config must both be specified or not specified')
if ref_url and ref_config_data:
ref_config_artifact = _enqueue_capture(
build, current_release, current_run, ref_url, ref_config_data,
baseline=True)
else:
_, last_good_run = _find_last_good_run(build)
if last_good_run:
current_run.ref_url = last_good_run.url
current_run.ref_image = last_good_run.image
current_run.ref_log = last_good_run.log
current_run.ref_config = last_good_run.config
db.session.add(current_run)
db.session.commit()
signals.run_updated_via_api.send(
app, build=build, release=current_release, run=current_run)
return flask.jsonify(
success=True,
build_id=build.id,
release_name=current_release.name,
release_number=current_release.number,
run_name=current_run.name,
url=current_run.url,
config=current_run.config,
ref_url=current_run.ref_url,
ref_config=current_run.ref_config) | python | def request_run():
"""Requests a new run for a release candidate."""
build = g.build
current_release, current_run = _get_or_create_run(build)
current_url = request.form.get('url', type=str)
config_data = request.form.get('config', default='{}', type=str)
utils.jsonify_assert(current_url, 'url to capture required')
utils.jsonify_assert(config_data, 'config document required')
config_artifact = _enqueue_capture(
build, current_release, current_run, current_url, config_data)
ref_url = request.form.get('ref_url', type=str)
ref_config_data = request.form.get('ref_config', type=str)
utils.jsonify_assert(
bool(ref_url) == bool(ref_config_data),
'ref_url and ref_config must both be specified or not specified')
if ref_url and ref_config_data:
ref_config_artifact = _enqueue_capture(
build, current_release, current_run, ref_url, ref_config_data,
baseline=True)
else:
_, last_good_run = _find_last_good_run(build)
if last_good_run:
current_run.ref_url = last_good_run.url
current_run.ref_image = last_good_run.image
current_run.ref_log = last_good_run.log
current_run.ref_config = last_good_run.config
db.session.add(current_run)
db.session.commit()
signals.run_updated_via_api.send(
app, build=build, release=current_release, run=current_run)
return flask.jsonify(
success=True,
build_id=build.id,
release_name=current_release.name,
release_number=current_release.number,
run_name=current_run.name,
url=current_run.url,
config=current_run.config,
ref_url=current_run.ref_url,
ref_config=current_run.ref_config) | [
"def",
"request_run",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"current_release",
",",
"current_run",
"=",
"_get_or_create_run",
"(",
"build",
")",
"current_url",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'url'",
",",
"type",
"=",
"str",
")",
"config_data",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'config'",
",",
"default",
"=",
"'{}'",
",",
"type",
"=",
"str",
")",
"utils",
".",
"jsonify_assert",
"(",
"current_url",
",",
"'url to capture required'",
")",
"utils",
".",
"jsonify_assert",
"(",
"config_data",
",",
"'config document required'",
")",
"config_artifact",
"=",
"_enqueue_capture",
"(",
"build",
",",
"current_release",
",",
"current_run",
",",
"current_url",
",",
"config_data",
")",
"ref_url",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'ref_url'",
",",
"type",
"=",
"str",
")",
"ref_config_data",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'ref_config'",
",",
"type",
"=",
"str",
")",
"utils",
".",
"jsonify_assert",
"(",
"bool",
"(",
"ref_url",
")",
"==",
"bool",
"(",
"ref_config_data",
")",
",",
"'ref_url and ref_config must both be specified or not specified'",
")",
"if",
"ref_url",
"and",
"ref_config_data",
":",
"ref_config_artifact",
"=",
"_enqueue_capture",
"(",
"build",
",",
"current_release",
",",
"current_run",
",",
"ref_url",
",",
"ref_config_data",
",",
"baseline",
"=",
"True",
")",
"else",
":",
"_",
",",
"last_good_run",
"=",
"_find_last_good_run",
"(",
"build",
")",
"if",
"last_good_run",
":",
"current_run",
".",
"ref_url",
"=",
"last_good_run",
".",
"url",
"current_run",
".",
"ref_image",
"=",
"last_good_run",
".",
"image",
"current_run",
".",
"ref_log",
"=",
"last_good_run",
".",
"log",
"current_run",
".",
"ref_config",
"=",
"last_good_run",
".",
"config",
"db",
".",
"session",
".",
"add",
"(",
"current_run",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"signals",
".",
"run_updated_via_api",
".",
"send",
"(",
"app",
",",
"build",
"=",
"build",
",",
"release",
"=",
"current_release",
",",
"run",
"=",
"current_run",
")",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
",",
"build_id",
"=",
"build",
".",
"id",
",",
"release_name",
"=",
"current_release",
".",
"name",
",",
"release_number",
"=",
"current_release",
".",
"number",
",",
"run_name",
"=",
"current_run",
".",
"name",
",",
"url",
"=",
"current_run",
".",
"url",
",",
"config",
"=",
"current_run",
".",
"config",
",",
"ref_url",
"=",
"current_run",
".",
"ref_url",
",",
"ref_config",
"=",
"current_run",
".",
"ref_config",
")"
]
| Requests a new run for a release candidate. | [
"Requests",
"a",
"new",
"run",
"for",
"a",
"release",
"candidate",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L350-L396 | train |
bslatkin/dpxdt | dpxdt/server/api.py | runs_done | def runs_done():
"""Marks a release candidate as having all runs reported."""
build = g.build
release_name, release_number = _get_release_params()
release = (
models.Release.query
.filter_by(build_id=build.id, name=release_name, number=release_number)
.with_lockmode('update')
.first())
utils.jsonify_assert(release, 'Release does not exist')
release.status = models.Release.PROCESSING
db.session.add(release)
_check_release_done_processing(release)
db.session.commit()
signals.release_updated_via_api.send(app, build=build, release=release)
logging.info('Runs done for release: build_id=%r, release_name=%r, '
'release_number=%d', build.id, release.name, release.number)
results_url = url_for(
'view_release',
id=build.id,
name=release.name,
number=release.number,
_external=True)
return flask.jsonify(
success=True,
results_url=results_url) | python | def runs_done():
"""Marks a release candidate as having all runs reported."""
build = g.build
release_name, release_number = _get_release_params()
release = (
models.Release.query
.filter_by(build_id=build.id, name=release_name, number=release_number)
.with_lockmode('update')
.first())
utils.jsonify_assert(release, 'Release does not exist')
release.status = models.Release.PROCESSING
db.session.add(release)
_check_release_done_processing(release)
db.session.commit()
signals.release_updated_via_api.send(app, build=build, release=release)
logging.info('Runs done for release: build_id=%r, release_name=%r, '
'release_number=%d', build.id, release.name, release.number)
results_url = url_for(
'view_release',
id=build.id,
name=release.name,
number=release.number,
_external=True)
return flask.jsonify(
success=True,
results_url=results_url) | [
"def",
"runs_done",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"release_name",
",",
"release_number",
"=",
"_get_release_params",
"(",
")",
"release",
"=",
"(",
"models",
".",
"Release",
".",
"query",
".",
"filter_by",
"(",
"build_id",
"=",
"build",
".",
"id",
",",
"name",
"=",
"release_name",
",",
"number",
"=",
"release_number",
")",
".",
"with_lockmode",
"(",
"'update'",
")",
".",
"first",
"(",
")",
")",
"utils",
".",
"jsonify_assert",
"(",
"release",
",",
"'Release does not exist'",
")",
"release",
".",
"status",
"=",
"models",
".",
"Release",
".",
"PROCESSING",
"db",
".",
"session",
".",
"add",
"(",
"release",
")",
"_check_release_done_processing",
"(",
"release",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"signals",
".",
"release_updated_via_api",
".",
"send",
"(",
"app",
",",
"build",
"=",
"build",
",",
"release",
"=",
"release",
")",
"logging",
".",
"info",
"(",
"'Runs done for release: build_id=%r, release_name=%r, '",
"'release_number=%d'",
",",
"build",
".",
"id",
",",
"release",
".",
"name",
",",
"release",
".",
"number",
")",
"results_url",
"=",
"url_for",
"(",
"'view_release'",
",",
"id",
"=",
"build",
".",
"id",
",",
"name",
"=",
"release",
".",
"name",
",",
"number",
"=",
"release",
".",
"number",
",",
"_external",
"=",
"True",
")",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
",",
"results_url",
"=",
"results_url",
")"
]
| Marks a release candidate as having all runs reported. | [
"Marks",
"a",
"release",
"candidate",
"as",
"having",
"all",
"runs",
"reported",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L531-L562 | train |
bslatkin/dpxdt | dpxdt/server/api.py | _save_artifact | def _save_artifact(build, data, content_type):
"""Saves an artifact to the DB and returns it."""
sha1sum = hashlib.sha1(data).hexdigest()
artifact = models.Artifact.query.filter_by(id=sha1sum).first()
if artifact:
logging.debug('Upload already exists: artifact_id=%r', sha1sum)
else:
logging.info('Upload received: artifact_id=%r, content_type=%r',
sha1sum, content_type)
artifact = models.Artifact(
id=sha1sum,
content_type=content_type,
data=data)
_artifact_created(artifact)
artifact.owners.append(build)
return artifact | python | def _save_artifact(build, data, content_type):
"""Saves an artifact to the DB and returns it."""
sha1sum = hashlib.sha1(data).hexdigest()
artifact = models.Artifact.query.filter_by(id=sha1sum).first()
if artifact:
logging.debug('Upload already exists: artifact_id=%r', sha1sum)
else:
logging.info('Upload received: artifact_id=%r, content_type=%r',
sha1sum, content_type)
artifact = models.Artifact(
id=sha1sum,
content_type=content_type,
data=data)
_artifact_created(artifact)
artifact.owners.append(build)
return artifact | [
"def",
"_save_artifact",
"(",
"build",
",",
"data",
",",
"content_type",
")",
":",
"sha1sum",
"=",
"hashlib",
".",
"sha1",
"(",
"data",
")",
".",
"hexdigest",
"(",
")",
"artifact",
"=",
"models",
".",
"Artifact",
".",
"query",
".",
"filter_by",
"(",
"id",
"=",
"sha1sum",
")",
".",
"first",
"(",
")",
"if",
"artifact",
":",
"logging",
".",
"debug",
"(",
"'Upload already exists: artifact_id=%r'",
",",
"sha1sum",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Upload received: artifact_id=%r, content_type=%r'",
",",
"sha1sum",
",",
"content_type",
")",
"artifact",
"=",
"models",
".",
"Artifact",
"(",
"id",
"=",
"sha1sum",
",",
"content_type",
"=",
"content_type",
",",
"data",
"=",
"data",
")",
"_artifact_created",
"(",
"artifact",
")",
"artifact",
".",
"owners",
".",
"append",
"(",
"build",
")",
"return",
"artifact"
]
| Saves an artifact to the DB and returns it. | [
"Saves",
"an",
"artifact",
"to",
"the",
"DB",
"and",
"returns",
"it",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L575-L592 | train |
bslatkin/dpxdt | dpxdt/server/api.py | upload | def upload():
"""Uploads an artifact referenced by a run."""
build = g.build
utils.jsonify_assert(len(request.files) == 1,
'Need exactly one uploaded file')
file_storage = request.files.values()[0]
data = file_storage.read()
content_type, _ = mimetypes.guess_type(file_storage.filename)
artifact = _save_artifact(build, data, content_type)
db.session.add(artifact)
db.session.commit()
return flask.jsonify(
success=True,
build_id=build.id,
sha1sum=artifact.id,
content_type=content_type) | python | def upload():
"""Uploads an artifact referenced by a run."""
build = g.build
utils.jsonify_assert(len(request.files) == 1,
'Need exactly one uploaded file')
file_storage = request.files.values()[0]
data = file_storage.read()
content_type, _ = mimetypes.guess_type(file_storage.filename)
artifact = _save_artifact(build, data, content_type)
db.session.add(artifact)
db.session.commit()
return flask.jsonify(
success=True,
build_id=build.id,
sha1sum=artifact.id,
content_type=content_type) | [
"def",
"upload",
"(",
")",
":",
"build",
"=",
"g",
".",
"build",
"utils",
".",
"jsonify_assert",
"(",
"len",
"(",
"request",
".",
"files",
")",
"==",
"1",
",",
"'Need exactly one uploaded file'",
")",
"file_storage",
"=",
"request",
".",
"files",
".",
"values",
"(",
")",
"[",
"0",
"]",
"data",
"=",
"file_storage",
".",
"read",
"(",
")",
"content_type",
",",
"_",
"=",
"mimetypes",
".",
"guess_type",
"(",
"file_storage",
".",
"filename",
")",
"artifact",
"=",
"_save_artifact",
"(",
"build",
",",
"data",
",",
"content_type",
")",
"db",
".",
"session",
".",
"add",
"(",
"artifact",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
",",
"build_id",
"=",
"build",
".",
"id",
",",
"sha1sum",
"=",
"artifact",
".",
"id",
",",
"content_type",
"=",
"content_type",
")"
]
| Uploads an artifact referenced by a run. | [
"Uploads",
"an",
"artifact",
"referenced",
"by",
"a",
"run",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L598-L617 | train |
bslatkin/dpxdt | dpxdt/server/api.py | _get_artifact_response | def _get_artifact_response(artifact):
"""Gets the response object for the given artifact.
This method may be overridden in environments that have a different way of
storing artifact files, such as on-disk or S3.
"""
response = flask.Response(
artifact.data,
mimetype=artifact.content_type)
response.cache_control.public = True
response.cache_control.max_age = 8640000
response.set_etag(artifact.id)
return response | python | def _get_artifact_response(artifact):
"""Gets the response object for the given artifact.
This method may be overridden in environments that have a different way of
storing artifact files, such as on-disk or S3.
"""
response = flask.Response(
artifact.data,
mimetype=artifact.content_type)
response.cache_control.public = True
response.cache_control.max_age = 8640000
response.set_etag(artifact.id)
return response | [
"def",
"_get_artifact_response",
"(",
"artifact",
")",
":",
"response",
"=",
"flask",
".",
"Response",
"(",
"artifact",
".",
"data",
",",
"mimetype",
"=",
"artifact",
".",
"content_type",
")",
"response",
".",
"cache_control",
".",
"public",
"=",
"True",
"response",
".",
"cache_control",
".",
"max_age",
"=",
"8640000",
"response",
".",
"set_etag",
"(",
"artifact",
".",
"id",
")",
"return",
"response"
]
| Gets the response object for the given artifact.
This method may be overridden in environments that have a different way of
storing artifact files, such as on-disk or S3. | [
"Gets",
"the",
"response",
"object",
"for",
"the",
"given",
"artifact",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L620-L632 | train |
bslatkin/dpxdt | dpxdt/server/api.py | download | def download():
"""Downloads an artifact by it's content hash."""
# Allow users with access to the build to download the file. Falls back
# to API keys with access to the build. Prefer user first for speed.
try:
build = auth.can_user_access_build('build_id')
except HTTPException:
logging.debug('User access to artifact failed. Trying API key.')
_, build = auth.can_api_key_access_build('build_id')
sha1sum = request.args.get('sha1sum', type=str)
if not sha1sum:
logging.debug('Artifact sha1sum=%r not supplied', sha1sum)
abort(404)
artifact = models.Artifact.query.get(sha1sum)
if not artifact:
logging.debug('Artifact sha1sum=%r does not exist', sha1sum)
abort(404)
build_id = request.args.get('build_id', type=int)
if not build_id:
logging.debug('build_id missing for artifact sha1sum=%r', sha1sum)
abort(404)
is_owned = artifact.owners.filter_by(id=build_id).first()
if not is_owned:
logging.debug('build_id=%r not owner of artifact sha1sum=%r',
build_id, sha1sum)
abort(403)
# Make sure there are no Set-Cookie headers on the response so this
# request is cachable by all HTTP frontends.
@utils.after_this_request
def no_session(response):
if 'Set-Cookie' in response.headers:
del response.headers['Set-Cookie']
if not utils.is_production():
# Insert a sleep to emulate how the page loading looks in production.
time.sleep(1.5)
if request.if_none_match and request.if_none_match.contains(sha1sum):
response = flask.Response(status=304)
return response
return _get_artifact_response(artifact) | python | def download():
"""Downloads an artifact by it's content hash."""
# Allow users with access to the build to download the file. Falls back
# to API keys with access to the build. Prefer user first for speed.
try:
build = auth.can_user_access_build('build_id')
except HTTPException:
logging.debug('User access to artifact failed. Trying API key.')
_, build = auth.can_api_key_access_build('build_id')
sha1sum = request.args.get('sha1sum', type=str)
if not sha1sum:
logging.debug('Artifact sha1sum=%r not supplied', sha1sum)
abort(404)
artifact = models.Artifact.query.get(sha1sum)
if not artifact:
logging.debug('Artifact sha1sum=%r does not exist', sha1sum)
abort(404)
build_id = request.args.get('build_id', type=int)
if not build_id:
logging.debug('build_id missing for artifact sha1sum=%r', sha1sum)
abort(404)
is_owned = artifact.owners.filter_by(id=build_id).first()
if not is_owned:
logging.debug('build_id=%r not owner of artifact sha1sum=%r',
build_id, sha1sum)
abort(403)
# Make sure there are no Set-Cookie headers on the response so this
# request is cachable by all HTTP frontends.
@utils.after_this_request
def no_session(response):
if 'Set-Cookie' in response.headers:
del response.headers['Set-Cookie']
if not utils.is_production():
# Insert a sleep to emulate how the page loading looks in production.
time.sleep(1.5)
if request.if_none_match and request.if_none_match.contains(sha1sum):
response = flask.Response(status=304)
return response
return _get_artifact_response(artifact) | [
"def",
"download",
"(",
")",
":",
"# Allow users with access to the build to download the file. Falls back",
"# to API keys with access to the build. Prefer user first for speed.",
"try",
":",
"build",
"=",
"auth",
".",
"can_user_access_build",
"(",
"'build_id'",
")",
"except",
"HTTPException",
":",
"logging",
".",
"debug",
"(",
"'User access to artifact failed. Trying API key.'",
")",
"_",
",",
"build",
"=",
"auth",
".",
"can_api_key_access_build",
"(",
"'build_id'",
")",
"sha1sum",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'sha1sum'",
",",
"type",
"=",
"str",
")",
"if",
"not",
"sha1sum",
":",
"logging",
".",
"debug",
"(",
"'Artifact sha1sum=%r not supplied'",
",",
"sha1sum",
")",
"abort",
"(",
"404",
")",
"artifact",
"=",
"models",
".",
"Artifact",
".",
"query",
".",
"get",
"(",
"sha1sum",
")",
"if",
"not",
"artifact",
":",
"logging",
".",
"debug",
"(",
"'Artifact sha1sum=%r does not exist'",
",",
"sha1sum",
")",
"abort",
"(",
"404",
")",
"build_id",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'build_id'",
",",
"type",
"=",
"int",
")",
"if",
"not",
"build_id",
":",
"logging",
".",
"debug",
"(",
"'build_id missing for artifact sha1sum=%r'",
",",
"sha1sum",
")",
"abort",
"(",
"404",
")",
"is_owned",
"=",
"artifact",
".",
"owners",
".",
"filter_by",
"(",
"id",
"=",
"build_id",
")",
".",
"first",
"(",
")",
"if",
"not",
"is_owned",
":",
"logging",
".",
"debug",
"(",
"'build_id=%r not owner of artifact sha1sum=%r'",
",",
"build_id",
",",
"sha1sum",
")",
"abort",
"(",
"403",
")",
"# Make sure there are no Set-Cookie headers on the response so this",
"# request is cachable by all HTTP frontends.",
"@",
"utils",
".",
"after_this_request",
"def",
"no_session",
"(",
"response",
")",
":",
"if",
"'Set-Cookie'",
"in",
"response",
".",
"headers",
":",
"del",
"response",
".",
"headers",
"[",
"'Set-Cookie'",
"]",
"if",
"not",
"utils",
".",
"is_production",
"(",
")",
":",
"# Insert a sleep to emulate how the page loading looks in production.",
"time",
".",
"sleep",
"(",
"1.5",
")",
"if",
"request",
".",
"if_none_match",
"and",
"request",
".",
"if_none_match",
".",
"contains",
"(",
"sha1sum",
")",
":",
"response",
"=",
"flask",
".",
"Response",
"(",
"status",
"=",
"304",
")",
"return",
"response",
"return",
"_get_artifact_response",
"(",
"artifact",
")"
]
| Downloads an artifact by it's content hash. | [
"Downloads",
"an",
"artifact",
"by",
"it",
"s",
"content",
"hash",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L636-L682 | train |
bslatkin/dpxdt | dpxdt/server/operations.py | BaseOps.evict | def evict(self):
"""Evict all caches related to these operations."""
logging.debug('Evicting cache for %r', self.cache_key)
_clear_version_cache(self.cache_key)
# Cause the cache key to be refreshed next time any operation is
# run to make sure we don't act on old cached data.
self.versioned_cache_key = None | python | def evict(self):
"""Evict all caches related to these operations."""
logging.debug('Evicting cache for %r', self.cache_key)
_clear_version_cache(self.cache_key)
# Cause the cache key to be refreshed next time any operation is
# run to make sure we don't act on old cached data.
self.versioned_cache_key = None | [
"def",
"evict",
"(",
"self",
")",
":",
"logging",
".",
"debug",
"(",
"'Evicting cache for %r'",
",",
"self",
".",
"cache_key",
")",
"_clear_version_cache",
"(",
"self",
".",
"cache_key",
")",
"# Cause the cache key to be refreshed next time any operation is",
"# run to make sure we don't act on old cached data.",
"self",
".",
"versioned_cache_key",
"=",
"None"
]
| Evict all caches related to these operations. | [
"Evict",
"all",
"caches",
"related",
"to",
"these",
"operations",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/operations.py#L72-L78 | train |
bslatkin/dpxdt | dpxdt/server/operations.py | BuildOps.sort_run | def sort_run(run):
"""Sort function for runs within a release."""
# Sort errors first, then by name. Also show errors that were manually
# approved, so the paging sort order stays the same even after users
# approve a diff on the run page.
if run.status in models.Run.DIFF_NEEDED_STATES:
return (0, run.name)
return (1, run.name) | python | def sort_run(run):
"""Sort function for runs within a release."""
# Sort errors first, then by name. Also show errors that were manually
# approved, so the paging sort order stays the same even after users
# approve a diff on the run page.
if run.status in models.Run.DIFF_NEEDED_STATES:
return (0, run.name)
return (1, run.name) | [
"def",
"sort_run",
"(",
"run",
")",
":",
"# Sort errors first, then by name. Also show errors that were manually",
"# approved, so the paging sort order stays the same even after users",
"# approve a diff on the run page.",
"if",
"run",
".",
"status",
"in",
"models",
".",
"Run",
".",
"DIFF_NEEDED_STATES",
":",
"return",
"(",
"0",
",",
"run",
".",
"name",
")",
"return",
"(",
"1",
",",
"run",
".",
"name",
")"
]
| Sort function for runs within a release. | [
"Sort",
"function",
"for",
"runs",
"within",
"a",
"release",
"."
]
| 9f860de1731021d99253670429e5f2157e1f6297 | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/operations.py#L170-L177 | train |
podio/valideer | valideer/base.py | parse | def parse(obj, required_properties=None, additional_properties=None,
ignore_optional_property_errors=None):
"""Try to parse the given ``obj`` as a validator instance.
:param obj: The object to be parsed. If it is a...:
- :py:class:`Validator` instance, return it.
- :py:class:`Validator` subclass, instantiate it without arguments and
return it.
- :py:attr:`~Validator.name` of a known :py:class:`Validator` subclass,
instantiate the subclass without arguments and return it.
- otherwise find the first registered :py:class:`Validator` factory that
can create it. The search order is the reverse of the factory registration
order. The caller is responsible for ensuring there are no ambiguous
values that can be parsed by more than one factory.
:param required_properties: Specifies for this parse call whether parsed
:py:class:`~valideer.validators.Object` properties are required or
optional by default. It can be:
- ``True`` for required.
- ``False`` for optional.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.REQUIRED_PROPERTIES` attribute.
:param additional_properties: Specifies for this parse call the schema of
all :py:class:`~valideer.validators.Object` properties that are not
explicitly defined as optional or required. It can also be:
- ``True`` to allow any value for additional properties.
- ``False`` to disallow any additional properties.
- :py:attr:`~valideer.validators.Object.REMOVE` to remove any additional
properties from the adapted object.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.ADDITIONAL_PROPERTIES` attribute.
:param ignore_optional_property_errors: Determines if invalid optional
properties are ignored:
- ``True`` to ignore invalid optional properties.
- ``False`` to raise ValidationError for invalid optional properties.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.IGNORE_OPTIONAL_PROPERTY_ERRORS`
attribute.
:raises SchemaError: If no appropriate validator could be found.
.. warning:: Passing ``required_properties`` and/or ``additional_properties``
with value other than ``None`` may be non intuitive for schemas that
involve nested validators. Take for example the following schema::
v = V.parse({
"x": "integer",
"child": V.Nullable({
"y": "integer"
})
}, required_properties=True)
Here the top-level properties 'x' and 'child' are required but the nested
'y' property is not. This is because by the time :py:meth:`parse` is called,
:py:class:`~valideer.validators.Nullable` has already parsed its argument
with the default value of ``required_properties``. Several other builtin
validators work similarly to :py:class:`~valideer.validators.Nullable`,
accepting one or more schemas to parse. In order to parse an arbitrarily
complex nested validator with the same value for ``required_properties``
and/or ``additional_properties``, use the :py:func:`parsing` context
manager instead::
with V.parsing(required_properties=True):
v = V.parse({
"x": "integer",
"child": V.Nullable({
"y": "integer"
})
})
"""
if not (required_properties is
additional_properties is
ignore_optional_property_errors is None):
with parsing(required_properties=required_properties,
additional_properties=additional_properties,
ignore_optional_property_errors=ignore_optional_property_errors):
return parse(obj)
validator = None
if isinstance(obj, Validator):
validator = obj
elif inspect.isclass(obj) and issubclass(obj, Validator):
validator = obj()
else:
try:
validator = _NAMED_VALIDATORS[obj]
except (KeyError, TypeError):
for factory in _VALIDATOR_FACTORIES:
validator = factory(obj)
if validator is not None:
break
else:
if inspect.isclass(validator) and issubclass(validator, Validator):
_NAMED_VALIDATORS[obj] = validator = validator()
if not isinstance(validator, Validator):
raise SchemaError("%r cannot be parsed as a Validator" % obj)
return validator | python | def parse(obj, required_properties=None, additional_properties=None,
ignore_optional_property_errors=None):
"""Try to parse the given ``obj`` as a validator instance.
:param obj: The object to be parsed. If it is a...:
- :py:class:`Validator` instance, return it.
- :py:class:`Validator` subclass, instantiate it without arguments and
return it.
- :py:attr:`~Validator.name` of a known :py:class:`Validator` subclass,
instantiate the subclass without arguments and return it.
- otherwise find the first registered :py:class:`Validator` factory that
can create it. The search order is the reverse of the factory registration
order. The caller is responsible for ensuring there are no ambiguous
values that can be parsed by more than one factory.
:param required_properties: Specifies for this parse call whether parsed
:py:class:`~valideer.validators.Object` properties are required or
optional by default. It can be:
- ``True`` for required.
- ``False`` for optional.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.REQUIRED_PROPERTIES` attribute.
:param additional_properties: Specifies for this parse call the schema of
all :py:class:`~valideer.validators.Object` properties that are not
explicitly defined as optional or required. It can also be:
- ``True`` to allow any value for additional properties.
- ``False`` to disallow any additional properties.
- :py:attr:`~valideer.validators.Object.REMOVE` to remove any additional
properties from the adapted object.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.ADDITIONAL_PROPERTIES` attribute.
:param ignore_optional_property_errors: Determines if invalid optional
properties are ignored:
- ``True`` to ignore invalid optional properties.
- ``False`` to raise ValidationError for invalid optional properties.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.IGNORE_OPTIONAL_PROPERTY_ERRORS`
attribute.
:raises SchemaError: If no appropriate validator could be found.
.. warning:: Passing ``required_properties`` and/or ``additional_properties``
with value other than ``None`` may be non intuitive for schemas that
involve nested validators. Take for example the following schema::
v = V.parse({
"x": "integer",
"child": V.Nullable({
"y": "integer"
})
}, required_properties=True)
Here the top-level properties 'x' and 'child' are required but the nested
'y' property is not. This is because by the time :py:meth:`parse` is called,
:py:class:`~valideer.validators.Nullable` has already parsed its argument
with the default value of ``required_properties``. Several other builtin
validators work similarly to :py:class:`~valideer.validators.Nullable`,
accepting one or more schemas to parse. In order to parse an arbitrarily
complex nested validator with the same value for ``required_properties``
and/or ``additional_properties``, use the :py:func:`parsing` context
manager instead::
with V.parsing(required_properties=True):
v = V.parse({
"x": "integer",
"child": V.Nullable({
"y": "integer"
})
})
"""
if not (required_properties is
additional_properties is
ignore_optional_property_errors is None):
with parsing(required_properties=required_properties,
additional_properties=additional_properties,
ignore_optional_property_errors=ignore_optional_property_errors):
return parse(obj)
validator = None
if isinstance(obj, Validator):
validator = obj
elif inspect.isclass(obj) and issubclass(obj, Validator):
validator = obj()
else:
try:
validator = _NAMED_VALIDATORS[obj]
except (KeyError, TypeError):
for factory in _VALIDATOR_FACTORIES:
validator = factory(obj)
if validator is not None:
break
else:
if inspect.isclass(validator) and issubclass(validator, Validator):
_NAMED_VALIDATORS[obj] = validator = validator()
if not isinstance(validator, Validator):
raise SchemaError("%r cannot be parsed as a Validator" % obj)
return validator | [
"def",
"parse",
"(",
"obj",
",",
"required_properties",
"=",
"None",
",",
"additional_properties",
"=",
"None",
",",
"ignore_optional_property_errors",
"=",
"None",
")",
":",
"if",
"not",
"(",
"required_properties",
"is",
"additional_properties",
"is",
"ignore_optional_property_errors",
"is",
"None",
")",
":",
"with",
"parsing",
"(",
"required_properties",
"=",
"required_properties",
",",
"additional_properties",
"=",
"additional_properties",
",",
"ignore_optional_property_errors",
"=",
"ignore_optional_property_errors",
")",
":",
"return",
"parse",
"(",
"obj",
")",
"validator",
"=",
"None",
"if",
"isinstance",
"(",
"obj",
",",
"Validator",
")",
":",
"validator",
"=",
"obj",
"elif",
"inspect",
".",
"isclass",
"(",
"obj",
")",
"and",
"issubclass",
"(",
"obj",
",",
"Validator",
")",
":",
"validator",
"=",
"obj",
"(",
")",
"else",
":",
"try",
":",
"validator",
"=",
"_NAMED_VALIDATORS",
"[",
"obj",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"for",
"factory",
"in",
"_VALIDATOR_FACTORIES",
":",
"validator",
"=",
"factory",
"(",
"obj",
")",
"if",
"validator",
"is",
"not",
"None",
":",
"break",
"else",
":",
"if",
"inspect",
".",
"isclass",
"(",
"validator",
")",
"and",
"issubclass",
"(",
"validator",
",",
"Validator",
")",
":",
"_NAMED_VALIDATORS",
"[",
"obj",
"]",
"=",
"validator",
"=",
"validator",
"(",
")",
"if",
"not",
"isinstance",
"(",
"validator",
",",
"Validator",
")",
":",
"raise",
"SchemaError",
"(",
"\"%r cannot be parsed as a Validator\"",
"%",
"obj",
")",
"return",
"validator"
]
| Try to parse the given ``obj`` as a validator instance.
:param obj: The object to be parsed. If it is a...:
- :py:class:`Validator` instance, return it.
- :py:class:`Validator` subclass, instantiate it without arguments and
return it.
- :py:attr:`~Validator.name` of a known :py:class:`Validator` subclass,
instantiate the subclass without arguments and return it.
- otherwise find the first registered :py:class:`Validator` factory that
can create it. The search order is the reverse of the factory registration
order. The caller is responsible for ensuring there are no ambiguous
values that can be parsed by more than one factory.
:param required_properties: Specifies for this parse call whether parsed
:py:class:`~valideer.validators.Object` properties are required or
optional by default. It can be:
- ``True`` for required.
- ``False`` for optional.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.REQUIRED_PROPERTIES` attribute.
:param additional_properties: Specifies for this parse call the schema of
all :py:class:`~valideer.validators.Object` properties that are not
explicitly defined as optional or required. It can also be:
- ``True`` to allow any value for additional properties.
- ``False`` to disallow any additional properties.
- :py:attr:`~valideer.validators.Object.REMOVE` to remove any additional
properties from the adapted object.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.ADDITIONAL_PROPERTIES` attribute.
:param ignore_optional_property_errors: Determines if invalid optional
properties are ignored:
- ``True`` to ignore invalid optional properties.
- ``False`` to raise ValidationError for invalid optional properties.
- ``None`` to use the value of the
:py:attr:`~valideer.validators.Object.IGNORE_OPTIONAL_PROPERTY_ERRORS`
attribute.
:raises SchemaError: If no appropriate validator could be found.
.. warning:: Passing ``required_properties`` and/or ``additional_properties``
with value other than ``None`` may be non intuitive for schemas that
involve nested validators. Take for example the following schema::
v = V.parse({
"x": "integer",
"child": V.Nullable({
"y": "integer"
})
}, required_properties=True)
Here the top-level properties 'x' and 'child' are required but the nested
'y' property is not. This is because by the time :py:meth:`parse` is called,
:py:class:`~valideer.validators.Nullable` has already parsed its argument
with the default value of ``required_properties``. Several other builtin
validators work similarly to :py:class:`~valideer.validators.Nullable`,
accepting one or more schemas to parse. In order to parse an arbitrarily
complex nested validator with the same value for ``required_properties``
and/or ``additional_properties``, use the :py:func:`parsing` context
manager instead::
with V.parsing(required_properties=True):
v = V.parse({
"x": "integer",
"child": V.Nullable({
"y": "integer"
})
}) | [
"Try",
"to",
"parse",
"the",
"given",
"obj",
"as",
"a",
"validator",
"instance",
"."
]
| d35be173cb40c9fa1adb879673786b346b6841db | https://github.com/podio/valideer/blob/d35be173cb40c9fa1adb879673786b346b6841db/valideer/base.py#L60-L165 | train |
podio/valideer | valideer/base.py | parsing | def parsing(**kwargs):
"""
Context manager for overriding the default validator parsing rules for the
following code block.
"""
from .validators import Object
with _VALIDATOR_FACTORIES_LOCK:
old_values = {}
for key, value in iteritems(kwargs):
if value is not None:
attr = key.upper()
old_values[key] = getattr(Object, attr)
setattr(Object, attr, value)
try:
yield
finally:
for key, value in iteritems(kwargs):
if value is not None:
setattr(Object, key.upper(), old_values[key]) | python | def parsing(**kwargs):
"""
Context manager for overriding the default validator parsing rules for the
following code block.
"""
from .validators import Object
with _VALIDATOR_FACTORIES_LOCK:
old_values = {}
for key, value in iteritems(kwargs):
if value is not None:
attr = key.upper()
old_values[key] = getattr(Object, attr)
setattr(Object, attr, value)
try:
yield
finally:
for key, value in iteritems(kwargs):
if value is not None:
setattr(Object, key.upper(), old_values[key]) | [
"def",
"parsing",
"(",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"validators",
"import",
"Object",
"with",
"_VALIDATOR_FACTORIES_LOCK",
":",
"old_values",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"kwargs",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"attr",
"=",
"key",
".",
"upper",
"(",
")",
"old_values",
"[",
"key",
"]",
"=",
"getattr",
"(",
"Object",
",",
"attr",
")",
"setattr",
"(",
"Object",
",",
"attr",
",",
"value",
")",
"try",
":",
"yield",
"finally",
":",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"kwargs",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"setattr",
"(",
"Object",
",",
"key",
".",
"upper",
"(",
")",
",",
"old_values",
"[",
"key",
"]",
")"
]
| Context manager for overriding the default validator parsing rules for the
following code block. | [
"Context",
"manager",
"for",
"overriding",
"the",
"default",
"validator",
"parsing",
"rules",
"for",
"the",
"following",
"code",
"block",
"."
]
| d35be173cb40c9fa1adb879673786b346b6841db | https://github.com/podio/valideer/blob/d35be173cb40c9fa1adb879673786b346b6841db/valideer/base.py#L169-L188 | train |
podio/valideer | valideer/base.py | register | def register(name, validator):
"""Register a validator instance under the given ``name``."""
if not isinstance(validator, Validator):
raise TypeError("Validator instance expected, %s given" % validator.__class__)
_NAMED_VALIDATORS[name] = validator | python | def register(name, validator):
"""Register a validator instance under the given ``name``."""
if not isinstance(validator, Validator):
raise TypeError("Validator instance expected, %s given" % validator.__class__)
_NAMED_VALIDATORS[name] = validator | [
"def",
"register",
"(",
"name",
",",
"validator",
")",
":",
"if",
"not",
"isinstance",
"(",
"validator",
",",
"Validator",
")",
":",
"raise",
"TypeError",
"(",
"\"Validator instance expected, %s given\"",
"%",
"validator",
".",
"__class__",
")",
"_NAMED_VALIDATORS",
"[",
"name",
"]",
"=",
"validator"
]
| Register a validator instance under the given ``name``. | [
"Register",
"a",
"validator",
"instance",
"under",
"the",
"given",
"name",
"."
]
| d35be173cb40c9fa1adb879673786b346b6841db | https://github.com/podio/valideer/blob/d35be173cb40c9fa1adb879673786b346b6841db/valideer/base.py#L191-L195 | train |
podio/valideer | valideer/base.py | accepts | def accepts(**schemas):
"""Create a decorator for validating function parameters.
Example::
@accepts(a="number", body={"+field_ids": [int], "is_ok": bool})
def f(a, body):
print (a, body["field_ids"], body.get("is_ok"))
:param schemas: The schema for validating a given parameter.
"""
validate = parse(schemas).validate
@decorator
def validating(func, *args, **kwargs):
validate(inspect.getcallargs(func, *args, **kwargs), adapt=False)
return func(*args, **kwargs)
return validating | python | def accepts(**schemas):
"""Create a decorator for validating function parameters.
Example::
@accepts(a="number", body={"+field_ids": [int], "is_ok": bool})
def f(a, body):
print (a, body["field_ids"], body.get("is_ok"))
:param schemas: The schema for validating a given parameter.
"""
validate = parse(schemas).validate
@decorator
def validating(func, *args, **kwargs):
validate(inspect.getcallargs(func, *args, **kwargs), adapt=False)
return func(*args, **kwargs)
return validating | [
"def",
"accepts",
"(",
"*",
"*",
"schemas",
")",
":",
"validate",
"=",
"parse",
"(",
"schemas",
")",
".",
"validate",
"@",
"decorator",
"def",
"validating",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"validate",
"(",
"inspect",
".",
"getcallargs",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"adapt",
"=",
"False",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"validating"
]
| Create a decorator for validating function parameters.
Example::
@accepts(a="number", body={"+field_ids": [int], "is_ok": bool})
def f(a, body):
print (a, body["field_ids"], body.get("is_ok"))
:param schemas: The schema for validating a given parameter. | [
"Create",
"a",
"decorator",
"for",
"validating",
"function",
"parameters",
"."
]
| d35be173cb40c9fa1adb879673786b346b6841db | https://github.com/podio/valideer/blob/d35be173cb40c9fa1adb879673786b346b6841db/valideer/base.py#L272-L289 | train |
podio/valideer | valideer/base.py | returns | def returns(schema):
"""Create a decorator for validating function return value.
Example::
@accepts(a=int, b=int)
@returns(int)
def f(a, b):
return a + b
:param schema: The schema for adapting a given parameter.
"""
validate = parse(schema).validate
@decorator
def validating(func, *args, **kwargs):
ret = func(*args, **kwargs)
validate(ret, adapt=False)
return ret
return validating | python | def returns(schema):
"""Create a decorator for validating function return value.
Example::
@accepts(a=int, b=int)
@returns(int)
def f(a, b):
return a + b
:param schema: The schema for adapting a given parameter.
"""
validate = parse(schema).validate
@decorator
def validating(func, *args, **kwargs):
ret = func(*args, **kwargs)
validate(ret, adapt=False)
return ret
return validating | [
"def",
"returns",
"(",
"schema",
")",
":",
"validate",
"=",
"parse",
"(",
"schema",
")",
".",
"validate",
"@",
"decorator",
"def",
"validating",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"validate",
"(",
"ret",
",",
"adapt",
"=",
"False",
")",
"return",
"ret",
"return",
"validating"
]
| Create a decorator for validating function return value.
Example::
@accepts(a=int, b=int)
@returns(int)
def f(a, b):
return a + b
:param schema: The schema for adapting a given parameter. | [
"Create",
"a",
"decorator",
"for",
"validating",
"function",
"return",
"value",
"."
]
| d35be173cb40c9fa1adb879673786b346b6841db | https://github.com/podio/valideer/blob/d35be173cb40c9fa1adb879673786b346b6841db/valideer/base.py#L292-L310 | train |
podio/valideer | valideer/base.py | adapts | def adapts(**schemas):
"""Create a decorator for validating and adapting function parameters.
Example::
@adapts(a="number", body={"+field_ids": [V.AdaptTo(int)], "is_ok": bool})
def f(a, body):
print (a, body.field_ids, body.is_ok)
:param schemas: The schema for adapting a given parameter.
"""
validate = parse(schemas).validate
@decorator
def adapting(func, *args, **kwargs):
adapted = validate(inspect.getcallargs(func, *args, **kwargs), adapt=True)
argspec = inspect.getargspec(func)
if argspec.varargs is argspec.keywords is None:
# optimization for the common no varargs, no keywords case
return func(**adapted)
adapted_varargs = adapted.pop(argspec.varargs, ())
adapted_keywords = adapted.pop(argspec.keywords, {})
if not adapted_varargs: # keywords only
if adapted_keywords:
adapted.update(adapted_keywords)
return func(**adapted)
adapted_posargs = [adapted[arg] for arg in argspec.args]
adapted_posargs.extend(adapted_varargs)
return func(*adapted_posargs, **adapted_keywords)
return adapting | python | def adapts(**schemas):
"""Create a decorator for validating and adapting function parameters.
Example::
@adapts(a="number", body={"+field_ids": [V.AdaptTo(int)], "is_ok": bool})
def f(a, body):
print (a, body.field_ids, body.is_ok)
:param schemas: The schema for adapting a given parameter.
"""
validate = parse(schemas).validate
@decorator
def adapting(func, *args, **kwargs):
adapted = validate(inspect.getcallargs(func, *args, **kwargs), adapt=True)
argspec = inspect.getargspec(func)
if argspec.varargs is argspec.keywords is None:
# optimization for the common no varargs, no keywords case
return func(**adapted)
adapted_varargs = adapted.pop(argspec.varargs, ())
adapted_keywords = adapted.pop(argspec.keywords, {})
if not adapted_varargs: # keywords only
if adapted_keywords:
adapted.update(adapted_keywords)
return func(**adapted)
adapted_posargs = [adapted[arg] for arg in argspec.args]
adapted_posargs.extend(adapted_varargs)
return func(*adapted_posargs, **adapted_keywords)
return adapting | [
"def",
"adapts",
"(",
"*",
"*",
"schemas",
")",
":",
"validate",
"=",
"parse",
"(",
"schemas",
")",
".",
"validate",
"@",
"decorator",
"def",
"adapting",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"adapted",
"=",
"validate",
"(",
"inspect",
".",
"getcallargs",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"adapt",
"=",
"True",
")",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"if",
"argspec",
".",
"varargs",
"is",
"argspec",
".",
"keywords",
"is",
"None",
":",
"# optimization for the common no varargs, no keywords case",
"return",
"func",
"(",
"*",
"*",
"adapted",
")",
"adapted_varargs",
"=",
"adapted",
".",
"pop",
"(",
"argspec",
".",
"varargs",
",",
"(",
")",
")",
"adapted_keywords",
"=",
"adapted",
".",
"pop",
"(",
"argspec",
".",
"keywords",
",",
"{",
"}",
")",
"if",
"not",
"adapted_varargs",
":",
"# keywords only",
"if",
"adapted_keywords",
":",
"adapted",
".",
"update",
"(",
"adapted_keywords",
")",
"return",
"func",
"(",
"*",
"*",
"adapted",
")",
"adapted_posargs",
"=",
"[",
"adapted",
"[",
"arg",
"]",
"for",
"arg",
"in",
"argspec",
".",
"args",
"]",
"adapted_posargs",
".",
"extend",
"(",
"adapted_varargs",
")",
"return",
"func",
"(",
"*",
"adapted_posargs",
",",
"*",
"*",
"adapted_keywords",
")",
"return",
"adapting"
]
| Create a decorator for validating and adapting function parameters.
Example::
@adapts(a="number", body={"+field_ids": [V.AdaptTo(int)], "is_ok": bool})
def f(a, body):
print (a, body.field_ids, body.is_ok)
:param schemas: The schema for adapting a given parameter. | [
"Create",
"a",
"decorator",
"for",
"validating",
"and",
"adapting",
"function",
"parameters",
"."
]
| d35be173cb40c9fa1adb879673786b346b6841db | https://github.com/podio/valideer/blob/d35be173cb40c9fa1adb879673786b346b6841db/valideer/base.py#L313-L346 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/client_side_checksum_handler.py | ClientSideChecksumHandler.get_checksum_metadata_tag | def get_checksum_metadata_tag(self):
""" Returns a map of checksum values by the name of the hashing function that produced it."""
if not self._checksums:
print("Warning: No checksums have been computed for this file.")
return {str(_hash_name): str(_hash_value) for _hash_name, _hash_value in self._checksums.items()} | python | def get_checksum_metadata_tag(self):
""" Returns a map of checksum values by the name of the hashing function that produced it."""
if not self._checksums:
print("Warning: No checksums have been computed for this file.")
return {str(_hash_name): str(_hash_value) for _hash_name, _hash_value in self._checksums.items()} | [
"def",
"get_checksum_metadata_tag",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_checksums",
":",
"print",
"(",
"\"Warning: No checksums have been computed for this file.\"",
")",
"return",
"{",
"str",
"(",
"_hash_name",
")",
":",
"str",
"(",
"_hash_value",
")",
"for",
"_hash_name",
",",
"_hash_value",
"in",
"self",
".",
"_checksums",
".",
"items",
"(",
")",
"}"
]
| Returns a map of checksum values by the name of the hashing function that produced it. | [
"Returns",
"a",
"map",
"of",
"checksum",
"values",
"by",
"the",
"name",
"of",
"the",
"hashing",
"function",
"that",
"produced",
"it",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/client_side_checksum_handler.py#L20-L24 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/client_side_checksum_handler.py | ClientSideChecksumHandler.compute_checksum | def compute_checksum(self):
""" Calculates checksums for a given file. """
if self._filename.startswith("s3://"):
print("Warning: Did not perform client-side checksumming for file in S3. To be implemented.")
pass
else:
checksumCalculator = self.ChecksumCalculator(self._filename)
self._checksums = checksumCalculator.compute() | python | def compute_checksum(self):
""" Calculates checksums for a given file. """
if self._filename.startswith("s3://"):
print("Warning: Did not perform client-side checksumming for file in S3. To be implemented.")
pass
else:
checksumCalculator = self.ChecksumCalculator(self._filename)
self._checksums = checksumCalculator.compute() | [
"def",
"compute_checksum",
"(",
"self",
")",
":",
"if",
"self",
".",
"_filename",
".",
"startswith",
"(",
"\"s3://\"",
")",
":",
"print",
"(",
"\"Warning: Did not perform client-side checksumming for file in S3. To be implemented.\"",
")",
"pass",
"else",
":",
"checksumCalculator",
"=",
"self",
".",
"ChecksumCalculator",
"(",
"self",
".",
"_filename",
")",
"self",
".",
"_checksums",
"=",
"checksumCalculator",
".",
"compute",
"(",
")"
]
| Calculates checksums for a given file. | [
"Calculates",
"checksums",
"for",
"a",
"given",
"file",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/client_side_checksum_handler.py#L26-L33 | train |
HumanCellAtlas/dcp-cli | hca/upload/upload_area.py | UploadArea.upload_files | def upload_files(self, file_paths, file_size_sum=0, dcp_type="data", target_filename=None,
use_transfer_acceleration=True, report_progress=False, sync=True):
"""
A function that takes in a list of file paths and other optional args for parallel file upload
"""
self._setup_s3_agent_for_file_upload(file_count=len(file_paths),
file_size_sum=file_size_sum,
use_transfer_acceleration=use_transfer_acceleration)
pool = ThreadPool()
if report_progress:
print("\nStarting upload of %s files to upload area %s" % (len(file_paths), self.uuid))
for file_path in file_paths:
pool.add_task(self._upload_file, file_path,
target_filename=target_filename,
use_transfer_acceleration=use_transfer_acceleration,
report_progress=report_progress,
sync=sync)
pool.wait_for_completion()
if report_progress:
number_of_errors = len(self.s3agent.failed_uploads)
if number_of_errors == 0:
print(
"Completed upload of %d files to upload area %s\n" %
(self.s3agent.file_upload_completed_count, self.uuid))
else:
error = "\nThe following files failed:"
for k, v in self.s3agent.failed_uploads.items():
error += "\n%s: [Exception] %s" % (k, v)
error += "\nPlease retry or contact an hca administrator at [email protected] for help.\n"
raise UploadException(error) | python | def upload_files(self, file_paths, file_size_sum=0, dcp_type="data", target_filename=None,
use_transfer_acceleration=True, report_progress=False, sync=True):
"""
A function that takes in a list of file paths and other optional args for parallel file upload
"""
self._setup_s3_agent_for_file_upload(file_count=len(file_paths),
file_size_sum=file_size_sum,
use_transfer_acceleration=use_transfer_acceleration)
pool = ThreadPool()
if report_progress:
print("\nStarting upload of %s files to upload area %s" % (len(file_paths), self.uuid))
for file_path in file_paths:
pool.add_task(self._upload_file, file_path,
target_filename=target_filename,
use_transfer_acceleration=use_transfer_acceleration,
report_progress=report_progress,
sync=sync)
pool.wait_for_completion()
if report_progress:
number_of_errors = len(self.s3agent.failed_uploads)
if number_of_errors == 0:
print(
"Completed upload of %d files to upload area %s\n" %
(self.s3agent.file_upload_completed_count, self.uuid))
else:
error = "\nThe following files failed:"
for k, v in self.s3agent.failed_uploads.items():
error += "\n%s: [Exception] %s" % (k, v)
error += "\nPlease retry or contact an hca administrator at [email protected] for help.\n"
raise UploadException(error) | [
"def",
"upload_files",
"(",
"self",
",",
"file_paths",
",",
"file_size_sum",
"=",
"0",
",",
"dcp_type",
"=",
"\"data\"",
",",
"target_filename",
"=",
"None",
",",
"use_transfer_acceleration",
"=",
"True",
",",
"report_progress",
"=",
"False",
",",
"sync",
"=",
"True",
")",
":",
"self",
".",
"_setup_s3_agent_for_file_upload",
"(",
"file_count",
"=",
"len",
"(",
"file_paths",
")",
",",
"file_size_sum",
"=",
"file_size_sum",
",",
"use_transfer_acceleration",
"=",
"use_transfer_acceleration",
")",
"pool",
"=",
"ThreadPool",
"(",
")",
"if",
"report_progress",
":",
"print",
"(",
"\"\\nStarting upload of %s files to upload area %s\"",
"%",
"(",
"len",
"(",
"file_paths",
")",
",",
"self",
".",
"uuid",
")",
")",
"for",
"file_path",
"in",
"file_paths",
":",
"pool",
".",
"add_task",
"(",
"self",
".",
"_upload_file",
",",
"file_path",
",",
"target_filename",
"=",
"target_filename",
",",
"use_transfer_acceleration",
"=",
"use_transfer_acceleration",
",",
"report_progress",
"=",
"report_progress",
",",
"sync",
"=",
"sync",
")",
"pool",
".",
"wait_for_completion",
"(",
")",
"if",
"report_progress",
":",
"number_of_errors",
"=",
"len",
"(",
"self",
".",
"s3agent",
".",
"failed_uploads",
")",
"if",
"number_of_errors",
"==",
"0",
":",
"print",
"(",
"\"Completed upload of %d files to upload area %s\\n\"",
"%",
"(",
"self",
".",
"s3agent",
".",
"file_upload_completed_count",
",",
"self",
".",
"uuid",
")",
")",
"else",
":",
"error",
"=",
"\"\\nThe following files failed:\"",
"for",
"k",
",",
"v",
"in",
"self",
".",
"s3agent",
".",
"failed_uploads",
".",
"items",
"(",
")",
":",
"error",
"+=",
"\"\\n%s: [Exception] %s\"",
"%",
"(",
"k",
",",
"v",
")",
"error",
"+=",
"\"\\nPlease retry or contact an hca administrator at [email protected] for help.\\n\"",
"raise",
"UploadException",
"(",
"error",
")"
]
| A function that takes in a list of file paths and other optional args for parallel file upload | [
"A",
"function",
"that",
"takes",
"in",
"a",
"list",
"of",
"file",
"paths",
"and",
"other",
"optional",
"args",
"for",
"parallel",
"file",
"upload"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/upload_area.py#L109-L138 | train |
HumanCellAtlas/dcp-cli | hca/upload/upload_area.py | UploadArea.validation_status | def validation_status(self, filename):
"""
Get status and results of latest validation job for a file.
:param str filename: The name of the file within the Upload Area
:return: a dict with validation information
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
return self.upload_service.api_client.validation_status(area_uuid=self.uuid, filename=filename) | python | def validation_status(self, filename):
"""
Get status and results of latest validation job for a file.
:param str filename: The name of the file within the Upload Area
:return: a dict with validation information
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
return self.upload_service.api_client.validation_status(area_uuid=self.uuid, filename=filename) | [
"def",
"validation_status",
"(",
"self",
",",
"filename",
")",
":",
"return",
"self",
".",
"upload_service",
".",
"api_client",
".",
"validation_status",
"(",
"area_uuid",
"=",
"self",
".",
"uuid",
",",
"filename",
"=",
"filename",
")"
]
| Get status and results of latest validation job for a file.
:param str filename: The name of the file within the Upload Area
:return: a dict with validation information
:rtype: dict
:raises UploadApiException: if information could not be obtained | [
"Get",
"status",
"and",
"results",
"of",
"latest",
"validation",
"job",
"for",
"a",
"file",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/upload_area.py#L181-L190 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/s3_agent.py | S3Agent._item_exists_in_bucket | def _item_exists_in_bucket(self, bucket, key, checksums):
""" Returns true if the key already exists in the current bucket and the clientside checksum matches the
file's checksums, and false otherwise."""
try:
obj = self.target_s3.meta.client.head_object(Bucket=bucket, Key=key)
if obj and obj.containsKey('Metadata'):
if obj['Metadata'] == checksums:
return True
except ClientError:
# An exception from calling `head_object` indicates that no file with the specified name could be found
# in the specified bucket.
return False | python | def _item_exists_in_bucket(self, bucket, key, checksums):
""" Returns true if the key already exists in the current bucket and the clientside checksum matches the
file's checksums, and false otherwise."""
try:
obj = self.target_s3.meta.client.head_object(Bucket=bucket, Key=key)
if obj and obj.containsKey('Metadata'):
if obj['Metadata'] == checksums:
return True
except ClientError:
# An exception from calling `head_object` indicates that no file with the specified name could be found
# in the specified bucket.
return False | [
"def",
"_item_exists_in_bucket",
"(",
"self",
",",
"bucket",
",",
"key",
",",
"checksums",
")",
":",
"try",
":",
"obj",
"=",
"self",
".",
"target_s3",
".",
"meta",
".",
"client",
".",
"head_object",
"(",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"key",
")",
"if",
"obj",
"and",
"obj",
".",
"containsKey",
"(",
"'Metadata'",
")",
":",
"if",
"obj",
"[",
"'Metadata'",
"]",
"==",
"checksums",
":",
"return",
"True",
"except",
"ClientError",
":",
"# An exception from calling `head_object` indicates that no file with the specified name could be found",
"# in the specified bucket.",
"return",
"False"
]
| Returns true if the key already exists in the current bucket and the clientside checksum matches the
file's checksums, and false otherwise. | [
"Returns",
"true",
"if",
"the",
"key",
"already",
"exists",
"in",
"the",
"current",
"bucket",
"and",
"the",
"clientside",
"checksum",
"matches",
"the",
"file",
"s",
"checksums",
"and",
"false",
"otherwise",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/s3_agent.py#L130-L141 | train |
HumanCellAtlas/dcp-cli | hca/dss/upload_to_cloud.py | upload_to_cloud | def upload_to_cloud(file_handles, staging_bucket, replica, from_cloud=False):
"""
Upload files to cloud.
:param file_handles: If from_cloud, file_handles is a aws s3 directory path to files with appropriate
metadata uploaded. Else, a list of binary file_handles to upload.
:param staging_bucket: The aws bucket to upload the files to.
:param replica: The cloud replica to write to. One of 'aws', 'gc', or 'azure'. No functionality now.
:return: a list of file uuids, key-names, and absolute file paths (local) for uploaded files
"""
s3 = boto3.resource("s3")
file_uuids = []
key_names = []
abs_file_paths = []
if from_cloud:
file_uuids, key_names = _copy_from_s3(file_handles[0], s3)
else:
destination_bucket = s3.Bucket(staging_bucket)
for raw_fh in file_handles:
file_size = os.path.getsize(raw_fh.name)
multipart_chunksize = s3_multipart.get_s3_multipart_chunk_size(file_size)
tx_cfg = TransferConfig(multipart_threshold=s3_multipart.MULTIPART_THRESHOLD,
multipart_chunksize=multipart_chunksize)
with ChecksummingBufferedReader(raw_fh, multipart_chunksize) as fh:
file_uuid = str(uuid.uuid4())
key_name = "{}/{}".format(file_uuid, os.path.basename(fh.raw.name))
destination_bucket.upload_fileobj(
fh,
key_name,
Config=tx_cfg,
ExtraArgs={
'ContentType': _mime_type(fh.raw.name),
}
)
sums = fh.get_checksums()
metadata = {
"hca-dss-s3_etag": sums["s3_etag"],
"hca-dss-sha1": sums["sha1"],
"hca-dss-sha256": sums["sha256"],
"hca-dss-crc32c": sums["crc32c"],
}
s3.meta.client.put_object_tagging(Bucket=destination_bucket.name,
Key=key_name,
Tagging=dict(TagSet=encode_tags(metadata)))
file_uuids.append(file_uuid)
key_names.append(key_name)
abs_file_paths.append(fh.raw.name)
return file_uuids, key_names, abs_file_paths | python | def upload_to_cloud(file_handles, staging_bucket, replica, from_cloud=False):
"""
Upload files to cloud.
:param file_handles: If from_cloud, file_handles is a aws s3 directory path to files with appropriate
metadata uploaded. Else, a list of binary file_handles to upload.
:param staging_bucket: The aws bucket to upload the files to.
:param replica: The cloud replica to write to. One of 'aws', 'gc', or 'azure'. No functionality now.
:return: a list of file uuids, key-names, and absolute file paths (local) for uploaded files
"""
s3 = boto3.resource("s3")
file_uuids = []
key_names = []
abs_file_paths = []
if from_cloud:
file_uuids, key_names = _copy_from_s3(file_handles[0], s3)
else:
destination_bucket = s3.Bucket(staging_bucket)
for raw_fh in file_handles:
file_size = os.path.getsize(raw_fh.name)
multipart_chunksize = s3_multipart.get_s3_multipart_chunk_size(file_size)
tx_cfg = TransferConfig(multipart_threshold=s3_multipart.MULTIPART_THRESHOLD,
multipart_chunksize=multipart_chunksize)
with ChecksummingBufferedReader(raw_fh, multipart_chunksize) as fh:
file_uuid = str(uuid.uuid4())
key_name = "{}/{}".format(file_uuid, os.path.basename(fh.raw.name))
destination_bucket.upload_fileobj(
fh,
key_name,
Config=tx_cfg,
ExtraArgs={
'ContentType': _mime_type(fh.raw.name),
}
)
sums = fh.get_checksums()
metadata = {
"hca-dss-s3_etag": sums["s3_etag"],
"hca-dss-sha1": sums["sha1"],
"hca-dss-sha256": sums["sha256"],
"hca-dss-crc32c": sums["crc32c"],
}
s3.meta.client.put_object_tagging(Bucket=destination_bucket.name,
Key=key_name,
Tagging=dict(TagSet=encode_tags(metadata)))
file_uuids.append(file_uuid)
key_names.append(key_name)
abs_file_paths.append(fh.raw.name)
return file_uuids, key_names, abs_file_paths | [
"def",
"upload_to_cloud",
"(",
"file_handles",
",",
"staging_bucket",
",",
"replica",
",",
"from_cloud",
"=",
"False",
")",
":",
"s3",
"=",
"boto3",
".",
"resource",
"(",
"\"s3\"",
")",
"file_uuids",
"=",
"[",
"]",
"key_names",
"=",
"[",
"]",
"abs_file_paths",
"=",
"[",
"]",
"if",
"from_cloud",
":",
"file_uuids",
",",
"key_names",
"=",
"_copy_from_s3",
"(",
"file_handles",
"[",
"0",
"]",
",",
"s3",
")",
"else",
":",
"destination_bucket",
"=",
"s3",
".",
"Bucket",
"(",
"staging_bucket",
")",
"for",
"raw_fh",
"in",
"file_handles",
":",
"file_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"raw_fh",
".",
"name",
")",
"multipart_chunksize",
"=",
"s3_multipart",
".",
"get_s3_multipart_chunk_size",
"(",
"file_size",
")",
"tx_cfg",
"=",
"TransferConfig",
"(",
"multipart_threshold",
"=",
"s3_multipart",
".",
"MULTIPART_THRESHOLD",
",",
"multipart_chunksize",
"=",
"multipart_chunksize",
")",
"with",
"ChecksummingBufferedReader",
"(",
"raw_fh",
",",
"multipart_chunksize",
")",
"as",
"fh",
":",
"file_uuid",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"key_name",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"file_uuid",
",",
"os",
".",
"path",
".",
"basename",
"(",
"fh",
".",
"raw",
".",
"name",
")",
")",
"destination_bucket",
".",
"upload_fileobj",
"(",
"fh",
",",
"key_name",
",",
"Config",
"=",
"tx_cfg",
",",
"ExtraArgs",
"=",
"{",
"'ContentType'",
":",
"_mime_type",
"(",
"fh",
".",
"raw",
".",
"name",
")",
",",
"}",
")",
"sums",
"=",
"fh",
".",
"get_checksums",
"(",
")",
"metadata",
"=",
"{",
"\"hca-dss-s3_etag\"",
":",
"sums",
"[",
"\"s3_etag\"",
"]",
",",
"\"hca-dss-sha1\"",
":",
"sums",
"[",
"\"sha1\"",
"]",
",",
"\"hca-dss-sha256\"",
":",
"sums",
"[",
"\"sha256\"",
"]",
",",
"\"hca-dss-crc32c\"",
":",
"sums",
"[",
"\"crc32c\"",
"]",
",",
"}",
"s3",
".",
"meta",
".",
"client",
".",
"put_object_tagging",
"(",
"Bucket",
"=",
"destination_bucket",
".",
"name",
",",
"Key",
"=",
"key_name",
",",
"Tagging",
"=",
"dict",
"(",
"TagSet",
"=",
"encode_tags",
"(",
"metadata",
")",
")",
")",
"file_uuids",
".",
"append",
"(",
"file_uuid",
")",
"key_names",
".",
"append",
"(",
"key_name",
")",
"abs_file_paths",
".",
"append",
"(",
"fh",
".",
"raw",
".",
"name",
")",
"return",
"file_uuids",
",",
"key_names",
",",
"abs_file_paths"
]
| Upload files to cloud.
:param file_handles: If from_cloud, file_handles is a aws s3 directory path to files with appropriate
metadata uploaded. Else, a list of binary file_handles to upload.
:param staging_bucket: The aws bucket to upload the files to.
:param replica: The cloud replica to write to. One of 'aws', 'gc', or 'azure'. No functionality now.
:return: a list of file uuids, key-names, and absolute file paths (local) for uploaded files | [
"Upload",
"files",
"to",
"cloud",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/upload_to_cloud.py#L53-L102 | train |
HumanCellAtlas/dcp-cli | hca/dss/__init__.py | DSSClient.download | def download(self, bundle_uuid, replica, version="", download_dir="",
metadata_files=('*',), data_files=('*',),
num_retries=10, min_delay_seconds=0.25):
"""
Download a bundle and save it to the local filesystem as a directory.
:param str bundle_uuid: The uuid of the bundle to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str version: The version to download, else if not specified, download the latest. The version is a
timestamp of bundle creation in RFC3339
:param str dest_name: The destination file path for the download
:param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be
matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is
set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded.
:param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched
case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The
file will be downloaded only if a data file matches any of the patterns in `data_files` will it be
downloaded.
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Download a bundle and save it to the local filesystem as a directory.
By default, all data and metadata files are downloaded. To disable the downloading of data files,
use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically).
Likewise for metadata files.
If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and
decreases each time we successfully read a block. We set a quota for the number of failures that goes up with
every successful block read and down with each failure.
"""
errors = 0
with concurrent.futures.ThreadPoolExecutor(self.threads) as executor:
futures_to_dss_file = {executor.submit(task): dss_file
for dss_file, task in self._download_tasks(bundle_uuid,
replica,
version,
download_dir,
metadata_files,
data_files,
num_retries,
min_delay_seconds)}
for future in concurrent.futures.as_completed(futures_to_dss_file):
dss_file = futures_to_dss_file[future]
try:
future.result()
except Exception as e:
errors += 1
logger.warning('Failed to download file %s version %s from replica %s',
dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e)
if errors:
raise RuntimeError('{} file(s) failed to download'.format(errors)) | python | def download(self, bundle_uuid, replica, version="", download_dir="",
metadata_files=('*',), data_files=('*',),
num_retries=10, min_delay_seconds=0.25):
"""
Download a bundle and save it to the local filesystem as a directory.
:param str bundle_uuid: The uuid of the bundle to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str version: The version to download, else if not specified, download the latest. The version is a
timestamp of bundle creation in RFC3339
:param str dest_name: The destination file path for the download
:param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be
matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is
set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded.
:param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched
case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The
file will be downloaded only if a data file matches any of the patterns in `data_files` will it be
downloaded.
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Download a bundle and save it to the local filesystem as a directory.
By default, all data and metadata files are downloaded. To disable the downloading of data files,
use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically).
Likewise for metadata files.
If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and
decreases each time we successfully read a block. We set a quota for the number of failures that goes up with
every successful block read and down with each failure.
"""
errors = 0
with concurrent.futures.ThreadPoolExecutor(self.threads) as executor:
futures_to_dss_file = {executor.submit(task): dss_file
for dss_file, task in self._download_tasks(bundle_uuid,
replica,
version,
download_dir,
metadata_files,
data_files,
num_retries,
min_delay_seconds)}
for future in concurrent.futures.as_completed(futures_to_dss_file):
dss_file = futures_to_dss_file[future]
try:
future.result()
except Exception as e:
errors += 1
logger.warning('Failed to download file %s version %s from replica %s',
dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e)
if errors:
raise RuntimeError('{} file(s) failed to download'.format(errors)) | [
"def",
"download",
"(",
"self",
",",
"bundle_uuid",
",",
"replica",
",",
"version",
"=",
"\"\"",
",",
"download_dir",
"=",
"\"\"",
",",
"metadata_files",
"=",
"(",
"'*'",
",",
")",
",",
"data_files",
"=",
"(",
"'*'",
",",
")",
",",
"num_retries",
"=",
"10",
",",
"min_delay_seconds",
"=",
"0.25",
")",
":",
"errors",
"=",
"0",
"with",
"concurrent",
".",
"futures",
".",
"ThreadPoolExecutor",
"(",
"self",
".",
"threads",
")",
"as",
"executor",
":",
"futures_to_dss_file",
"=",
"{",
"executor",
".",
"submit",
"(",
"task",
")",
":",
"dss_file",
"for",
"dss_file",
",",
"task",
"in",
"self",
".",
"_download_tasks",
"(",
"bundle_uuid",
",",
"replica",
",",
"version",
",",
"download_dir",
",",
"metadata_files",
",",
"data_files",
",",
"num_retries",
",",
"min_delay_seconds",
")",
"}",
"for",
"future",
"in",
"concurrent",
".",
"futures",
".",
"as_completed",
"(",
"futures_to_dss_file",
")",
":",
"dss_file",
"=",
"futures_to_dss_file",
"[",
"future",
"]",
"try",
":",
"future",
".",
"result",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"errors",
"+=",
"1",
"logger",
".",
"warning",
"(",
"'Failed to download file %s version %s from replica %s'",
",",
"dss_file",
".",
"uuid",
",",
"dss_file",
".",
"version",
",",
"dss_file",
".",
"replica",
",",
"exc_info",
"=",
"e",
")",
"if",
"errors",
":",
"raise",
"RuntimeError",
"(",
"'{} file(s) failed to download'",
".",
"format",
"(",
"errors",
")",
")"
]
| Download a bundle and save it to the local filesystem as a directory.
:param str bundle_uuid: The uuid of the bundle to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str version: The version to download, else if not specified, download the latest. The version is a
timestamp of bundle creation in RFC3339
:param str dest_name: The destination file path for the download
:param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be
matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is
set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded.
:param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched
case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The
file will be downloaded only if a data file matches any of the patterns in `data_files` will it be
downloaded.
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Download a bundle and save it to the local filesystem as a directory.
By default, all data and metadata files are downloaded. To disable the downloading of data files,
use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically).
Likewise for metadata files.
If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and
decreases each time we successfully read a block. We set a quota for the number of failures that goes up with
every successful block read and down with each failure. | [
"Download",
"a",
"bundle",
"and",
"save",
"it",
"to",
"the",
"local",
"filesystem",
"as",
"a",
"directory",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/__init__.py#L87-L140 | train |
HumanCellAtlas/dcp-cli | hca/dss/__init__.py | DSSClient._download_file | def _download_file(self, dss_file, dest_path, num_retries=10, min_delay_seconds=0.25):
"""
Attempt to download the data. If a retryable exception occurs, we wait a bit and retry again. The delay
increases each time we fail and decreases each time we successfully read a block. We set a quota for the
number of failures that goes up with every successful block read and down with each failure.
If we can, we will attempt HTTP resume. However, we verify that the server supports HTTP resume. If the
ranged get doesn't yield the correct header, then we start over.
"""
directory, _ = os.path.split(dest_path)
if directory:
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with atomic_write(dest_path, mode="wb", overwrite=True) as fh:
if dss_file.size == 0:
return
download_hash = self._do_download_file(dss_file, fh, num_retries, min_delay_seconds)
if download_hash.lower() != dss_file.sha256.lower():
# No need to delete what's been written. atomic_write ensures we're cleaned up
logger.error("%s", "File {}: GET FAILED. Checksum mismatch.".format(dss_file.uuid))
raise ValueError("Expected sha256 {} Received sha256 {}".format(
dss_file.sha256.lower(), download_hash.lower())) | python | def _download_file(self, dss_file, dest_path, num_retries=10, min_delay_seconds=0.25):
"""
Attempt to download the data. If a retryable exception occurs, we wait a bit and retry again. The delay
increases each time we fail and decreases each time we successfully read a block. We set a quota for the
number of failures that goes up with every successful block read and down with each failure.
If we can, we will attempt HTTP resume. However, we verify that the server supports HTTP resume. If the
ranged get doesn't yield the correct header, then we start over.
"""
directory, _ = os.path.split(dest_path)
if directory:
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with atomic_write(dest_path, mode="wb", overwrite=True) as fh:
if dss_file.size == 0:
return
download_hash = self._do_download_file(dss_file, fh, num_retries, min_delay_seconds)
if download_hash.lower() != dss_file.sha256.lower():
# No need to delete what's been written. atomic_write ensures we're cleaned up
logger.error("%s", "File {}: GET FAILED. Checksum mismatch.".format(dss_file.uuid))
raise ValueError("Expected sha256 {} Received sha256 {}".format(
dss_file.sha256.lower(), download_hash.lower())) | [
"def",
"_download_file",
"(",
"self",
",",
"dss_file",
",",
"dest_path",
",",
"num_retries",
"=",
"10",
",",
"min_delay_seconds",
"=",
"0.25",
")",
":",
"directory",
",",
"_",
"=",
"os",
".",
"path",
".",
"split",
"(",
"dest_path",
")",
"if",
"directory",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"directory",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
"with",
"atomic_write",
"(",
"dest_path",
",",
"mode",
"=",
"\"wb\"",
",",
"overwrite",
"=",
"True",
")",
"as",
"fh",
":",
"if",
"dss_file",
".",
"size",
"==",
"0",
":",
"return",
"download_hash",
"=",
"self",
".",
"_do_download_file",
"(",
"dss_file",
",",
"fh",
",",
"num_retries",
",",
"min_delay_seconds",
")",
"if",
"download_hash",
".",
"lower",
"(",
")",
"!=",
"dss_file",
".",
"sha256",
".",
"lower",
"(",
")",
":",
"# No need to delete what's been written. atomic_write ensures we're cleaned up",
"logger",
".",
"error",
"(",
"\"%s\"",
",",
"\"File {}: GET FAILED. Checksum mismatch.\"",
".",
"format",
"(",
"dss_file",
".",
"uuid",
")",
")",
"raise",
"ValueError",
"(",
"\"Expected sha256 {} Received sha256 {}\"",
".",
"format",
"(",
"dss_file",
".",
"sha256",
".",
"lower",
"(",
")",
",",
"download_hash",
".",
"lower",
"(",
")",
")",
")"
]
| Attempt to download the data. If a retryable exception occurs, we wait a bit and retry again. The delay
increases each time we fail and decreases each time we successfully read a block. We set a quota for the
number of failures that goes up with every successful block read and down with each failure.
If we can, we will attempt HTTP resume. However, we verify that the server supports HTTP resume. If the
ranged get doesn't yield the correct header, then we start over. | [
"Attempt",
"to",
"download",
"the",
"data",
".",
"If",
"a",
"retryable",
"exception",
"occurs",
"we",
"wait",
"a",
"bit",
"and",
"retry",
"again",
".",
"The",
"delay",
"increases",
"each",
"time",
"we",
"fail",
"and",
"decreases",
"each",
"time",
"we",
"successfully",
"read",
"a",
"block",
".",
"We",
"set",
"a",
"quota",
"for",
"the",
"number",
"of",
"failures",
"that",
"goes",
"up",
"with",
"every",
"successful",
"block",
"read",
"and",
"down",
"with",
"each",
"failure",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/__init__.py#L211-L237 | train |
HumanCellAtlas/dcp-cli | hca/dss/__init__.py | DSSClient._do_download_file | def _do_download_file(self, dss_file, fh, num_retries, min_delay_seconds):
"""
Abstracts away complications for downloading a file, handles retries and delays, and computes its hash
"""
hasher = hashlib.sha256()
delay = min_delay_seconds
retries_left = num_retries
while True:
try:
response = self.get_file._request(
dict(uuid=dss_file.uuid, version=dss_file.version, replica=dss_file.replica),
stream=True,
headers={
'Range': "bytes={}-".format(fh.tell())
},
)
try:
if not response.ok:
logger.error("%s", "File {}: GET FAILED.".format(dss_file.uuid))
logger.error("%s", "Response: {}".format(response.text))
break
consume_bytes = int(fh.tell())
server_start = 0
content_range_header = response.headers.get('Content-Range', None)
if content_range_header is not None:
cre = re.compile("bytes (\d+)-(\d+)")
mo = cre.search(content_range_header)
if mo is not None:
server_start = int(mo.group(1))
consume_bytes -= server_start
assert consume_bytes >= 0
if server_start > 0 and consume_bytes == 0:
logger.info("%s", "File {}: Resuming at {}.".format(
dss_file.uuid, server_start))
elif consume_bytes > 0:
logger.info("%s", "File {}: Resuming at {}. Dropping {} bytes to match".format(
dss_file.uuid, server_start, consume_bytes))
while consume_bytes > 0:
bytes_to_read = min(consume_bytes, 1024*1024)
content = response.iter_content(chunk_size=bytes_to_read)
chunk = next(content)
if chunk:
consume_bytes -= len(chunk)
for chunk in response.iter_content(chunk_size=1024*1024):
if chunk:
fh.write(chunk)
hasher.update(chunk)
retries_left = min(retries_left + 1, num_retries)
delay = max(delay / 2, min_delay_seconds)
break
finally:
response.close()
except (ChunkedEncodingError, ConnectionError, ReadTimeout):
if retries_left > 0:
logger.info("%s", "File {}: GET FAILED. Attempting to resume.".format(dss_file.uuid))
time.sleep(delay)
delay *= 2
retries_left -= 1
continue
raise
return hasher.hexdigest() | python | def _do_download_file(self, dss_file, fh, num_retries, min_delay_seconds):
"""
Abstracts away complications for downloading a file, handles retries and delays, and computes its hash
"""
hasher = hashlib.sha256()
delay = min_delay_seconds
retries_left = num_retries
while True:
try:
response = self.get_file._request(
dict(uuid=dss_file.uuid, version=dss_file.version, replica=dss_file.replica),
stream=True,
headers={
'Range': "bytes={}-".format(fh.tell())
},
)
try:
if not response.ok:
logger.error("%s", "File {}: GET FAILED.".format(dss_file.uuid))
logger.error("%s", "Response: {}".format(response.text))
break
consume_bytes = int(fh.tell())
server_start = 0
content_range_header = response.headers.get('Content-Range', None)
if content_range_header is not None:
cre = re.compile("bytes (\d+)-(\d+)")
mo = cre.search(content_range_header)
if mo is not None:
server_start = int(mo.group(1))
consume_bytes -= server_start
assert consume_bytes >= 0
if server_start > 0 and consume_bytes == 0:
logger.info("%s", "File {}: Resuming at {}.".format(
dss_file.uuid, server_start))
elif consume_bytes > 0:
logger.info("%s", "File {}: Resuming at {}. Dropping {} bytes to match".format(
dss_file.uuid, server_start, consume_bytes))
while consume_bytes > 0:
bytes_to_read = min(consume_bytes, 1024*1024)
content = response.iter_content(chunk_size=bytes_to_read)
chunk = next(content)
if chunk:
consume_bytes -= len(chunk)
for chunk in response.iter_content(chunk_size=1024*1024):
if chunk:
fh.write(chunk)
hasher.update(chunk)
retries_left = min(retries_left + 1, num_retries)
delay = max(delay / 2, min_delay_seconds)
break
finally:
response.close()
except (ChunkedEncodingError, ConnectionError, ReadTimeout):
if retries_left > 0:
logger.info("%s", "File {}: GET FAILED. Attempting to resume.".format(dss_file.uuid))
time.sleep(delay)
delay *= 2
retries_left -= 1
continue
raise
return hasher.hexdigest() | [
"def",
"_do_download_file",
"(",
"self",
",",
"dss_file",
",",
"fh",
",",
"num_retries",
",",
"min_delay_seconds",
")",
":",
"hasher",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"delay",
"=",
"min_delay_seconds",
"retries_left",
"=",
"num_retries",
"while",
"True",
":",
"try",
":",
"response",
"=",
"self",
".",
"get_file",
".",
"_request",
"(",
"dict",
"(",
"uuid",
"=",
"dss_file",
".",
"uuid",
",",
"version",
"=",
"dss_file",
".",
"version",
",",
"replica",
"=",
"dss_file",
".",
"replica",
")",
",",
"stream",
"=",
"True",
",",
"headers",
"=",
"{",
"'Range'",
":",
"\"bytes={}-\"",
".",
"format",
"(",
"fh",
".",
"tell",
"(",
")",
")",
"}",
",",
")",
"try",
":",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"error",
"(",
"\"%s\"",
",",
"\"File {}: GET FAILED.\"",
".",
"format",
"(",
"dss_file",
".",
"uuid",
")",
")",
"logger",
".",
"error",
"(",
"\"%s\"",
",",
"\"Response: {}\"",
".",
"format",
"(",
"response",
".",
"text",
")",
")",
"break",
"consume_bytes",
"=",
"int",
"(",
"fh",
".",
"tell",
"(",
")",
")",
"server_start",
"=",
"0",
"content_range_header",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Range'",
",",
"None",
")",
"if",
"content_range_header",
"is",
"not",
"None",
":",
"cre",
"=",
"re",
".",
"compile",
"(",
"\"bytes (\\d+)-(\\d+)\"",
")",
"mo",
"=",
"cre",
".",
"search",
"(",
"content_range_header",
")",
"if",
"mo",
"is",
"not",
"None",
":",
"server_start",
"=",
"int",
"(",
"mo",
".",
"group",
"(",
"1",
")",
")",
"consume_bytes",
"-=",
"server_start",
"assert",
"consume_bytes",
">=",
"0",
"if",
"server_start",
">",
"0",
"and",
"consume_bytes",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"\"%s\"",
",",
"\"File {}: Resuming at {}.\"",
".",
"format",
"(",
"dss_file",
".",
"uuid",
",",
"server_start",
")",
")",
"elif",
"consume_bytes",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"%s\"",
",",
"\"File {}: Resuming at {}. Dropping {} bytes to match\"",
".",
"format",
"(",
"dss_file",
".",
"uuid",
",",
"server_start",
",",
"consume_bytes",
")",
")",
"while",
"consume_bytes",
">",
"0",
":",
"bytes_to_read",
"=",
"min",
"(",
"consume_bytes",
",",
"1024",
"*",
"1024",
")",
"content",
"=",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"bytes_to_read",
")",
"chunk",
"=",
"next",
"(",
"content",
")",
"if",
"chunk",
":",
"consume_bytes",
"-=",
"len",
"(",
"chunk",
")",
"for",
"chunk",
"in",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"1024",
"*",
"1024",
")",
":",
"if",
"chunk",
":",
"fh",
".",
"write",
"(",
"chunk",
")",
"hasher",
".",
"update",
"(",
"chunk",
")",
"retries_left",
"=",
"min",
"(",
"retries_left",
"+",
"1",
",",
"num_retries",
")",
"delay",
"=",
"max",
"(",
"delay",
"/",
"2",
",",
"min_delay_seconds",
")",
"break",
"finally",
":",
"response",
".",
"close",
"(",
")",
"except",
"(",
"ChunkedEncodingError",
",",
"ConnectionError",
",",
"ReadTimeout",
")",
":",
"if",
"retries_left",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"%s\"",
",",
"\"File {}: GET FAILED. Attempting to resume.\"",
".",
"format",
"(",
"dss_file",
".",
"uuid",
")",
")",
"time",
".",
"sleep",
"(",
"delay",
")",
"delay",
"*=",
"2",
"retries_left",
"-=",
"1",
"continue",
"raise",
"return",
"hasher",
".",
"hexdigest",
"(",
")"
]
| Abstracts away complications for downloading a file, handles retries and delays, and computes its hash | [
"Abstracts",
"away",
"complications",
"for",
"downloading",
"a",
"file",
"handles",
"retries",
"and",
"delays",
"and",
"computes",
"its",
"hash"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/__init__.py#L239-L303 | train |
HumanCellAtlas/dcp-cli | hca/dss/__init__.py | DSSClient._write_output_manifest | def _write_output_manifest(self, manifest, filestore_root):
"""
Adds the file path column to the manifest and writes the copy to the current directory. If the original manifest
is in the current directory it is overwritten with a warning.
"""
output = os.path.basename(manifest)
fieldnames, source_manifest = self._parse_manifest(manifest)
if 'file_path' not in fieldnames:
fieldnames.append('file_path')
with atomic_write(output, overwrite=True) as f:
delimiter = b'\t' if USING_PYTHON2 else '\t'
writer = csv.DictWriter(f, fieldnames, delimiter=delimiter, quoting=csv.QUOTE_NONE)
writer.writeheader()
for row in source_manifest:
row['file_path'] = self._file_path(row['file_sha256'], filestore_root)
writer.writerow(row)
if os.path.isfile(output):
logger.warning('Overwriting manifest %s', output)
logger.info('Rewrote manifest %s with additional column containing path to downloaded files.', output) | python | def _write_output_manifest(self, manifest, filestore_root):
"""
Adds the file path column to the manifest and writes the copy to the current directory. If the original manifest
is in the current directory it is overwritten with a warning.
"""
output = os.path.basename(manifest)
fieldnames, source_manifest = self._parse_manifest(manifest)
if 'file_path' not in fieldnames:
fieldnames.append('file_path')
with atomic_write(output, overwrite=True) as f:
delimiter = b'\t' if USING_PYTHON2 else '\t'
writer = csv.DictWriter(f, fieldnames, delimiter=delimiter, quoting=csv.QUOTE_NONE)
writer.writeheader()
for row in source_manifest:
row['file_path'] = self._file_path(row['file_sha256'], filestore_root)
writer.writerow(row)
if os.path.isfile(output):
logger.warning('Overwriting manifest %s', output)
logger.info('Rewrote manifest %s with additional column containing path to downloaded files.', output) | [
"def",
"_write_output_manifest",
"(",
"self",
",",
"manifest",
",",
"filestore_root",
")",
":",
"output",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"manifest",
")",
"fieldnames",
",",
"source_manifest",
"=",
"self",
".",
"_parse_manifest",
"(",
"manifest",
")",
"if",
"'file_path'",
"not",
"in",
"fieldnames",
":",
"fieldnames",
".",
"append",
"(",
"'file_path'",
")",
"with",
"atomic_write",
"(",
"output",
",",
"overwrite",
"=",
"True",
")",
"as",
"f",
":",
"delimiter",
"=",
"b'\\t'",
"if",
"USING_PYTHON2",
"else",
"'\\t'",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"f",
",",
"fieldnames",
",",
"delimiter",
"=",
"delimiter",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_NONE",
")",
"writer",
".",
"writeheader",
"(",
")",
"for",
"row",
"in",
"source_manifest",
":",
"row",
"[",
"'file_path'",
"]",
"=",
"self",
".",
"_file_path",
"(",
"row",
"[",
"'file_sha256'",
"]",
",",
"filestore_root",
")",
"writer",
".",
"writerow",
"(",
"row",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"output",
")",
":",
"logger",
".",
"warning",
"(",
"'Overwriting manifest %s'",
",",
"output",
")",
"logger",
".",
"info",
"(",
"'Rewrote manifest %s with additional column containing path to downloaded files.'",
",",
"output",
")"
]
| Adds the file path column to the manifest and writes the copy to the current directory. If the original manifest
is in the current directory it is overwritten with a warning. | [
"Adds",
"the",
"file",
"path",
"column",
"to",
"the",
"manifest",
"and",
"writes",
"the",
"copy",
"to",
"the",
"current",
"directory",
".",
"If",
"the",
"original",
"manifest",
"is",
"in",
"the",
"current",
"directory",
"it",
"is",
"overwritten",
"with",
"a",
"warning",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/__init__.py#L332-L350 | train |
HumanCellAtlas/dcp-cli | hca/dss/util/__init__.py | hardlink | def hardlink(source, link_name):
"""
Create a hardlink in a portable way
The code for Windows support is adapted from:
https://github.com/sunshowers/ntfs/blob/master/ntfsutils/hardlink.py
"""
if sys.version_info < (3,) and platform.system() == 'Windows': # pragma: no cover
import ctypes
create_hard_link = ctypes.windll.kernel32.CreateHardLinkW
create_hard_link.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p]
create_hard_link.restype = ctypes.wintypes.BOOL
res = create_hard_link(link_name, source, None)
if res == 0:
raise ctypes.WinError()
else:
try:
os.link(source, link_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
# It's possible that the user created a different file with the same name as the
# one we're trying to download. Thus we need to check the if the inode is different
# and raise an error in this case.
source_stat = os.stat(source)
dest_stat = os.stat(link_name)
# Check device first because different drives can have the same inode number
if source_stat.st_dev != dest_stat.st_dev or source_stat.st_ino != dest_stat.st_ino:
raise | python | def hardlink(source, link_name):
"""
Create a hardlink in a portable way
The code for Windows support is adapted from:
https://github.com/sunshowers/ntfs/blob/master/ntfsutils/hardlink.py
"""
if sys.version_info < (3,) and platform.system() == 'Windows': # pragma: no cover
import ctypes
create_hard_link = ctypes.windll.kernel32.CreateHardLinkW
create_hard_link.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p]
create_hard_link.restype = ctypes.wintypes.BOOL
res = create_hard_link(link_name, source, None)
if res == 0:
raise ctypes.WinError()
else:
try:
os.link(source, link_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
# It's possible that the user created a different file with the same name as the
# one we're trying to download. Thus we need to check the if the inode is different
# and raise an error in this case.
source_stat = os.stat(source)
dest_stat = os.stat(link_name)
# Check device first because different drives can have the same inode number
if source_stat.st_dev != dest_stat.st_dev or source_stat.st_ino != dest_stat.st_ino:
raise | [
"def",
"hardlink",
"(",
"source",
",",
"link_name",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
"and",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"# pragma: no cover",
"import",
"ctypes",
"create_hard_link",
"=",
"ctypes",
".",
"windll",
".",
"kernel32",
".",
"CreateHardLinkW",
"create_hard_link",
".",
"argtypes",
"=",
"[",
"ctypes",
".",
"c_wchar_p",
",",
"ctypes",
".",
"c_wchar_p",
",",
"ctypes",
".",
"c_void_p",
"]",
"create_hard_link",
".",
"restype",
"=",
"ctypes",
".",
"wintypes",
".",
"BOOL",
"res",
"=",
"create_hard_link",
"(",
"link_name",
",",
"source",
",",
"None",
")",
"if",
"res",
"==",
"0",
":",
"raise",
"ctypes",
".",
"WinError",
"(",
")",
"else",
":",
"try",
":",
"os",
".",
"link",
"(",
"source",
",",
"link_name",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
"else",
":",
"# It's possible that the user created a different file with the same name as the",
"# one we're trying to download. Thus we need to check the if the inode is different",
"# and raise an error in this case.",
"source_stat",
"=",
"os",
".",
"stat",
"(",
"source",
")",
"dest_stat",
"=",
"os",
".",
"stat",
"(",
"link_name",
")",
"# Check device first because different drives can have the same inode number",
"if",
"source_stat",
".",
"st_dev",
"!=",
"dest_stat",
".",
"st_dev",
"or",
"source_stat",
".",
"st_ino",
"!=",
"dest_stat",
".",
"st_ino",
":",
"raise"
]
| Create a hardlink in a portable way
The code for Windows support is adapted from:
https://github.com/sunshowers/ntfs/blob/master/ntfsutils/hardlink.py | [
"Create",
"a",
"hardlink",
"in",
"a",
"portable",
"way"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/util/__init__.py#L40-L69 | train |
HumanCellAtlas/dcp-cli | hca/util/__init__.py | _ClientMethodFactory.request_with_retries_on_post_search | def request_with_retries_on_post_search(self, session, url, query, json_input, stream, headers):
"""
Submit a request and retry POST search requests specifically.
We don't currently retry on POST requests, and this is intended as a temporary fix until
the swagger is updated and changes applied to prod. In the meantime, this function will add
retries specifically for POST search (and any other POST requests will not be retried).
"""
# TODO: Revert this PR as soon as the appropriate swagger definitions have percolated up
# to prod and merged; see https://github.com/HumanCellAtlas/data-store/pull/1961
status_code = 500
if '/v1/search' in url:
retry_count = 10
else:
retry_count = 1
while status_code in (500, 502, 503, 504) and retry_count > 0:
try:
retry_count -= 1
res = session.request(self.http_method,
url,
params=query,
json=json_input,
stream=stream,
headers=headers,
timeout=self.client.timeout_policy)
status_code = res.status_code
except SwaggerAPIException:
if retry_count > 0:
pass
else:
raise
return res | python | def request_with_retries_on_post_search(self, session, url, query, json_input, stream, headers):
"""
Submit a request and retry POST search requests specifically.
We don't currently retry on POST requests, and this is intended as a temporary fix until
the swagger is updated and changes applied to prod. In the meantime, this function will add
retries specifically for POST search (and any other POST requests will not be retried).
"""
# TODO: Revert this PR as soon as the appropriate swagger definitions have percolated up
# to prod and merged; see https://github.com/HumanCellAtlas/data-store/pull/1961
status_code = 500
if '/v1/search' in url:
retry_count = 10
else:
retry_count = 1
while status_code in (500, 502, 503, 504) and retry_count > 0:
try:
retry_count -= 1
res = session.request(self.http_method,
url,
params=query,
json=json_input,
stream=stream,
headers=headers,
timeout=self.client.timeout_policy)
status_code = res.status_code
except SwaggerAPIException:
if retry_count > 0:
pass
else:
raise
return res | [
"def",
"request_with_retries_on_post_search",
"(",
"self",
",",
"session",
",",
"url",
",",
"query",
",",
"json_input",
",",
"stream",
",",
"headers",
")",
":",
"# TODO: Revert this PR as soon as the appropriate swagger definitions have percolated up",
"# to prod and merged; see https://github.com/HumanCellAtlas/data-store/pull/1961",
"status_code",
"=",
"500",
"if",
"'/v1/search'",
"in",
"url",
":",
"retry_count",
"=",
"10",
"else",
":",
"retry_count",
"=",
"1",
"while",
"status_code",
"in",
"(",
"500",
",",
"502",
",",
"503",
",",
"504",
")",
"and",
"retry_count",
">",
"0",
":",
"try",
":",
"retry_count",
"-=",
"1",
"res",
"=",
"session",
".",
"request",
"(",
"self",
".",
"http_method",
",",
"url",
",",
"params",
"=",
"query",
",",
"json",
"=",
"json_input",
",",
"stream",
"=",
"stream",
",",
"headers",
"=",
"headers",
",",
"timeout",
"=",
"self",
".",
"client",
".",
"timeout_policy",
")",
"status_code",
"=",
"res",
".",
"status_code",
"except",
"SwaggerAPIException",
":",
"if",
"retry_count",
">",
"0",
":",
"pass",
"else",
":",
"raise",
"return",
"res"
]
| Submit a request and retry POST search requests specifically.
We don't currently retry on POST requests, and this is intended as a temporary fix until
the swagger is updated and changes applied to prod. In the meantime, this function will add
retries specifically for POST search (and any other POST requests will not be retried). | [
"Submit",
"a",
"request",
"and",
"retry",
"POST",
"search",
"requests",
"specifically",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/util/__init__.py#L143-L174 | train |
HumanCellAtlas/dcp-cli | hca/util/__init__.py | SwaggerClient.refresh_swagger | def refresh_swagger(self):
"""
Manually refresh the swagger document. This can help resolve errors communicate with the API.
"""
try:
os.remove(self._get_swagger_filename(self.swagger_url))
except EnvironmentError as e:
logger.warn(os.strerror(e.errno))
else:
self.__init__() | python | def refresh_swagger(self):
"""
Manually refresh the swagger document. This can help resolve errors communicate with the API.
"""
try:
os.remove(self._get_swagger_filename(self.swagger_url))
except EnvironmentError as e:
logger.warn(os.strerror(e.errno))
else:
self.__init__() | [
"def",
"refresh_swagger",
"(",
"self",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"self",
".",
"_get_swagger_filename",
"(",
"self",
".",
"swagger_url",
")",
")",
"except",
"EnvironmentError",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"os",
".",
"strerror",
"(",
"e",
".",
"errno",
")",
")",
"else",
":",
"self",
".",
"__init__",
"(",
")"
]
| Manually refresh the swagger document. This can help resolve errors communicate with the API. | [
"Manually",
"refresh",
"the",
"swagger",
"document",
".",
"This",
"can",
"help",
"resolve",
"errors",
"communicate",
"with",
"the",
"API",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/util/__init__.py#L335-L344 | train |
HumanCellAtlas/dcp-cli | hca/upload/upload_config.py | UploadConfig.add_area | def add_area(self, uri):
"""
Record information about a new Upload Area
:param UploadAreaURI uri: An Upload Area URI.
"""
if uri.area_uuid not in self._config.upload.areas:
self._config.upload.areas[uri.area_uuid] = {'uri': uri.uri}
self.save() | python | def add_area(self, uri):
"""
Record information about a new Upload Area
:param UploadAreaURI uri: An Upload Area URI.
"""
if uri.area_uuid not in self._config.upload.areas:
self._config.upload.areas[uri.area_uuid] = {'uri': uri.uri}
self.save() | [
"def",
"add_area",
"(",
"self",
",",
"uri",
")",
":",
"if",
"uri",
".",
"area_uuid",
"not",
"in",
"self",
".",
"_config",
".",
"upload",
".",
"areas",
":",
"self",
".",
"_config",
".",
"upload",
".",
"areas",
"[",
"uri",
".",
"area_uuid",
"]",
"=",
"{",
"'uri'",
":",
"uri",
".",
"uri",
"}",
"self",
".",
"save",
"(",
")"
]
| Record information about a new Upload Area
:param UploadAreaURI uri: An Upload Area URI. | [
"Record",
"information",
"about",
"a",
"new",
"Upload",
"Area"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/upload_config.py#L97-L105 | train |
HumanCellAtlas/dcp-cli | hca/upload/upload_config.py | UploadConfig.select_area | def select_area(self, area_uuid):
"""
Update the "current area" to be the area with this UUID.
:param str area_uuid: The RFC4122-compliant UUID of the Upload Area.
"""
self._config.upload.current_area = area_uuid
self.save() | python | def select_area(self, area_uuid):
"""
Update the "current area" to be the area with this UUID.
:param str area_uuid: The RFC4122-compliant UUID of the Upload Area.
"""
self._config.upload.current_area = area_uuid
self.save() | [
"def",
"select_area",
"(",
"self",
",",
"area_uuid",
")",
":",
"self",
".",
"_config",
".",
"upload",
".",
"current_area",
"=",
"area_uuid",
"self",
".",
"save",
"(",
")"
]
| Update the "current area" to be the area with this UUID.
:param str area_uuid: The RFC4122-compliant UUID of the Upload Area. | [
"Update",
"the",
"current",
"area",
"to",
"be",
"the",
"area",
"with",
"this",
"UUID",
"."
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/upload_config.py#L107-L115 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/api_client.py | ApiClient.create_area | def create_area(self, area_uuid):
"""
Create an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict of the form { "uri": "s3://<bucket_name>/<upload-area-id>/" }
:rtype: dict
:raises UploadApiException: if the an Upload Area was not created
"""
response = self._make_request('post',
path="/area/{id}".format(id=area_uuid),
headers={'Api-Key': self.auth_token})
return response.json() | python | def create_area(self, area_uuid):
"""
Create an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict of the form { "uri": "s3://<bucket_name>/<upload-area-id>/" }
:rtype: dict
:raises UploadApiException: if the an Upload Area was not created
"""
response = self._make_request('post',
path="/area/{id}".format(id=area_uuid),
headers={'Api-Key': self.auth_token})
return response.json() | [
"def",
"create_area",
"(",
"self",
",",
"area_uuid",
")",
":",
"response",
"=",
"self",
".",
"_make_request",
"(",
"'post'",
",",
"path",
"=",
"\"/area/{id}\"",
".",
"format",
"(",
"id",
"=",
"area_uuid",
")",
",",
"headers",
"=",
"{",
"'Api-Key'",
":",
"self",
".",
"auth_token",
"}",
")",
"return",
"response",
".",
"json",
"(",
")"
]
| Create an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict of the form { "uri": "s3://<bucket_name>/<upload-area-id>/" }
:rtype: dict
:raises UploadApiException: if the an Upload Area was not created | [
"Create",
"an",
"Upload",
"Area"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/api_client.py#L35-L47 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/api_client.py | ApiClient.area_exists | def area_exists(self, area_uuid):
"""
Check if an Upload Area exists
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: True or False
:rtype: bool
"""
response = requests.head(self._url(path="/area/{id}".format(id=area_uuid)))
return response.ok | python | def area_exists(self, area_uuid):
"""
Check if an Upload Area exists
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: True or False
:rtype: bool
"""
response = requests.head(self._url(path="/area/{id}".format(id=area_uuid)))
return response.ok | [
"def",
"area_exists",
"(",
"self",
",",
"area_uuid",
")",
":",
"response",
"=",
"requests",
".",
"head",
"(",
"self",
".",
"_url",
"(",
"path",
"=",
"\"/area/{id}\"",
".",
"format",
"(",
"id",
"=",
"area_uuid",
")",
")",
")",
"return",
"response",
".",
"ok"
]
| Check if an Upload Area exists
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: True or False
:rtype: bool | [
"Check",
"if",
"an",
"Upload",
"Area",
"exists"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/api_client.py#L49-L58 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/api_client.py | ApiClient.delete_area | def delete_area(self, area_uuid):
"""
Delete an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: True
:rtype: bool
:raises UploadApiException: if the an Upload Area was not deleted
"""
self._make_request('delete', path="/area/{id}".format(id=area_uuid),
headers={'Api-Key': self.auth_token})
return True | python | def delete_area(self, area_uuid):
"""
Delete an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: True
:rtype: bool
:raises UploadApiException: if the an Upload Area was not deleted
"""
self._make_request('delete', path="/area/{id}".format(id=area_uuid),
headers={'Api-Key': self.auth_token})
return True | [
"def",
"delete_area",
"(",
"self",
",",
"area_uuid",
")",
":",
"self",
".",
"_make_request",
"(",
"'delete'",
",",
"path",
"=",
"\"/area/{id}\"",
".",
"format",
"(",
"id",
"=",
"area_uuid",
")",
",",
"headers",
"=",
"{",
"'Api-Key'",
":",
"self",
".",
"auth_token",
"}",
")",
"return",
"True"
]
| Delete an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: True
:rtype: bool
:raises UploadApiException: if the an Upload Area was not deleted | [
"Delete",
"an",
"Upload",
"Area"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/api_client.py#L60-L71 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/api_client.py | ApiClient.credentials | def credentials(self, area_uuid):
"""
Get AWS credentials required to directly upload files to Upload Area in S3
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict containing an AWS AccessKey, SecretKey and SessionToken
:rtype: dict
:raises UploadApiException: if credentials could not be obtained
"""
response = self._make_request("post", path="/area/{uuid}/credentials".format(uuid=area_uuid))
return response.json() | python | def credentials(self, area_uuid):
"""
Get AWS credentials required to directly upload files to Upload Area in S3
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict containing an AWS AccessKey, SecretKey and SessionToken
:rtype: dict
:raises UploadApiException: if credentials could not be obtained
"""
response = self._make_request("post", path="/area/{uuid}/credentials".format(uuid=area_uuid))
return response.json() | [
"def",
"credentials",
"(",
"self",
",",
"area_uuid",
")",
":",
"response",
"=",
"self",
".",
"_make_request",
"(",
"\"post\"",
",",
"path",
"=",
"\"/area/{uuid}/credentials\"",
".",
"format",
"(",
"uuid",
"=",
"area_uuid",
")",
")",
"return",
"response",
".",
"json",
"(",
")"
]
| Get AWS credentials required to directly upload files to Upload Area in S3
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict containing an AWS AccessKey, SecretKey and SessionToken
:rtype: dict
:raises UploadApiException: if credentials could not be obtained | [
"Get",
"AWS",
"credentials",
"required",
"to",
"directly",
"upload",
"files",
"to",
"Upload",
"Area",
"in",
"S3"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/api_client.py#L73-L83 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/api_client.py | ApiClient.file_upload_notification | def file_upload_notification(self, area_uuid, filename):
"""
Notify Upload Service that a file has been placed in an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param str filename: The name the file in the Upload Area
:return: True
:rtype: bool
:raises UploadApiException: if file could not be stored
"""
url_safe_filename = urlparse.quote(filename)
path = ("/area/{area_uuid}/{filename}".format(area_uuid=area_uuid, filename=url_safe_filename))
response = self._make_request('post', path=path)
return response.ok | python | def file_upload_notification(self, area_uuid, filename):
"""
Notify Upload Service that a file has been placed in an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param str filename: The name the file in the Upload Area
:return: True
:rtype: bool
:raises UploadApiException: if file could not be stored
"""
url_safe_filename = urlparse.quote(filename)
path = ("/area/{area_uuid}/{filename}".format(area_uuid=area_uuid, filename=url_safe_filename))
response = self._make_request('post', path=path)
return response.ok | [
"def",
"file_upload_notification",
"(",
"self",
",",
"area_uuid",
",",
"filename",
")",
":",
"url_safe_filename",
"=",
"urlparse",
".",
"quote",
"(",
"filename",
")",
"path",
"=",
"(",
"\"/area/{area_uuid}/{filename}\"",
".",
"format",
"(",
"area_uuid",
"=",
"area_uuid",
",",
"filename",
"=",
"url_safe_filename",
")",
")",
"response",
"=",
"self",
".",
"_make_request",
"(",
"'post'",
",",
"path",
"=",
"path",
")",
"return",
"response",
".",
"ok"
]
| Notify Upload Service that a file has been placed in an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param str filename: The name the file in the Upload Area
:return: True
:rtype: bool
:raises UploadApiException: if file could not be stored | [
"Notify",
"Upload",
"Service",
"that",
"a",
"file",
"has",
"been",
"placed",
"in",
"an",
"Upload",
"Area"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/api_client.py#L111-L124 | train |
HumanCellAtlas/dcp-cli | hca/upload/lib/api_client.py | ApiClient.files_info | def files_info(self, area_uuid, file_list):
"""
Get information about files
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param list file_list: The names the files in the Upload Area about which we want information
:return: an array of file information dicts
:rtype: list of dicts
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/files_info".format(uuid=area_uuid)
file_list = [urlparse.quote(filename) for filename in file_list]
response = self._make_request('put', path=path, json=file_list)
return response.json() | python | def files_info(self, area_uuid, file_list):
"""
Get information about files
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param list file_list: The names the files in the Upload Area about which we want information
:return: an array of file information dicts
:rtype: list of dicts
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/files_info".format(uuid=area_uuid)
file_list = [urlparse.quote(filename) for filename in file_list]
response = self._make_request('put', path=path, json=file_list)
return response.json() | [
"def",
"files_info",
"(",
"self",
",",
"area_uuid",
",",
"file_list",
")",
":",
"path",
"=",
"\"/area/{uuid}/files_info\"",
".",
"format",
"(",
"uuid",
"=",
"area_uuid",
")",
"file_list",
"=",
"[",
"urlparse",
".",
"quote",
"(",
"filename",
")",
"for",
"filename",
"in",
"file_list",
"]",
"response",
"=",
"self",
".",
"_make_request",
"(",
"'put'",
",",
"path",
"=",
"path",
",",
"json",
"=",
"file_list",
")",
"return",
"response",
".",
"json",
"(",
")"
]
| Get information about files
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param list file_list: The names the files in the Upload Area about which we want information
:return: an array of file information dicts
:rtype: list of dicts
:raises UploadApiException: if information could not be obtained | [
"Get",
"information",
"about",
"files"
]
| cc70817bc4e50944c709eaae160de0bf7a19f0f3 | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/api_client.py#L126-L139 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.