Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def get_deployment_timestamp():
# TODO: Support other deployment situations.
if os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Engine'):
version_id = os.environ.get('CURRENT_VERSION_ID')
major_version, timestamp = version_id.split('.', 1)
return timestamp
return 'test' | [
"Returns a unique string represeting the current deployment.\n\n Used for busting caches.\n "
]
|
Please provide a description of the function:def register(coordinator):
if FLAGS.phantomjs_script:
utils.verify_binary('phantomjs_binary', ['--version'])
assert os.path.exists(FLAGS.phantomjs_script)
else:
utils.verify_binary('capture_binary', ['--version'])
assert FLAGS.capture_script
assert os.path.exists(FLAGS.capture_script)
assert FLAGS.capture_threads > 0
assert FLAGS.queue_server_prefix
item = queue_worker.RemoteQueueWorkflow(
constants.CAPTURE_QUEUE_NAME,
DoCaptureQueueWorkflow,
max_tasks=FLAGS.capture_threads,
wait_seconds=FLAGS.capture_wait_seconds)
item.root = True
coordinator.input_queue.put(item) | [
"Registers this module as a worker with the given coordinator."
]
|
Please provide a description of the function:def real_main(new_url=None,
baseline_url=None,
upload_build_id=None,
upload_release_name=None):
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
item = UrlPairDiff(
new_url,
baseline_url,
upload_build_id,
upload_release_name=upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | [
"Runs the ur_pair_diff."
]
|
Please provide a description of the function:def fetch_internal(item, request):
# Break client dependence on Flask if internal fetches aren't being used.
from flask import make_response
from werkzeug.test import EnvironBuilder
# Break circular dependencies.
from dpxdt.server import app
# Attempt to create a Flask environment from a urllib2.Request object.
environ_base = {
'REMOTE_ADDR': '127.0.0.1',
}
# The data object may be a generator from poster.multipart_encode, so we
# need to convert that to raw bytes here. Unfortunately EnvironBuilder
# only works with the whole request buffered in memory.
data = request.get_data()
if data and not isinstance(data, str):
data = ''.join(list(data))
builder = EnvironBuilder(
path=request.get_selector(),
base_url='%s://%s' % (request.get_type(), request.get_host()),
method=request.get_method(),
data=data,
headers=request.header_items(),
environ_base=environ_base)
with app.request_context(builder.get_environ()):
response = make_response(app.dispatch_request())
LOGGER.info('"%s" %s via internal routing',
request.get_selector(), response.status_code)
item.status_code = response.status_code
item.content_type = response.mimetype
if item.result_path:
# TODO: Is there a better way to access the response stream?
with open(item.result_path, 'wb') as result_file:
for piece in response.iter_encoded():
result_file.write(piece)
else:
item.data = response.get_data()
return item | [
"Fetches the given request by using the local Flask context."
]
|
Please provide a description of the function:def fetch_normal(item, request):
try:
conn = urllib2.urlopen(request, timeout=item.timeout_seconds)
except urllib2.HTTPError, e:
conn = e
except (urllib2.URLError, ssl.SSLError), e:
# TODO: Make this status more clear
item.status_code = 400
return item
try:
item.status_code = conn.getcode()
item.content_type = conn.info().gettype()
if item.result_path:
with open(item.result_path, 'wb') as result_file:
shutil.copyfileobj(conn, result_file)
else:
item.data = conn.read()
except socket.timeout, e:
# TODO: Make this status more clear
item.status_code = 400
return item
finally:
conn.close()
return item | [
"Fetches the given request over HTTP."
]
|
Please provide a description of the function:def register(coordinator):
fetch_queue = Queue.Queue()
coordinator.register(FetchItem, fetch_queue)
for i in xrange(FLAGS.fetch_threads):
coordinator.worker_threads.append(
FetchThread(fetch_queue, coordinator.input_queue)) | [
"Registers this module as a worker with the given coordinator."
]
|
Please provide a description of the function:def json(self):
if self._data_json:
return self._data_json
if not self.data or self.content_type != 'application/json':
return None
self._data_json = json.loads(self.data)
return self._data_json | [
"Returns de-JSONed data or None if it's a different content type."
]
|
Please provide a description of the function:def maybe_imgur(self, path):
'''Uploads a file to imgur if requested via command line flags.
Returns either "path" or "path url" depending on the course of action.
'''
if not FLAGS.imgur_client_id:
return path
im = pyimgur.Imgur(FLAGS.imgur_client_id)
uploaded_image = im.upload_image(path)
return '%s %s' % (path, uploaded_image.link) | []
|
Please provide a description of the function:def real_main(release_url=None,
tests_json_path=None,
upload_build_id=None,
upload_release_name=None):
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
data = open(FLAGS.tests_json_path).read()
tests = load_tests(data)
item = DiffMyImages(
release_url,
tests,
upload_build_id,
upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | [
"Runs diff_my_images."
]
|
Please provide a description of the function:def clean_url(url, force_scheme=None):
# URL should be ASCII according to RFC 3986
url = str(url)
# Collapse ../../ and related
url_parts = urlparse.urlparse(url)
path_parts = []
for part in url_parts.path.split('/'):
if part == '.':
continue
elif part == '..':
if path_parts:
path_parts.pop()
else:
path_parts.append(part)
url_parts = list(url_parts)
if force_scheme:
url_parts[0] = force_scheme
url_parts[2] = '/'.join(path_parts)
if FLAGS.keep_query_string == False:
url_parts[4] = '' # No query string
url_parts[5] = '' # No path
# Always have a trailing slash
if not url_parts[2]:
url_parts[2] = '/'
return urlparse.urlunparse(url_parts) | [
"Cleans the given URL."
]
|
Please provide a description of the function:def extract_urls(url, data, unescape=HTMLParser.HTMLParser().unescape):
parts = urlparse.urlparse(url)
prefix = '%s://%s' % (parts.scheme, parts.netloc)
accessed_dir = os.path.dirname(parts.path)
if not accessed_dir.endswith('/'):
accessed_dir += '/'
for pattern, replacement in REPLACEMENT_REGEXES:
fixed = replacement % {
'base': prefix,
'accessed_dir': accessed_dir,
}
data = re.sub(pattern, fixed, data)
result = set()
for match in re.finditer(MAYBE_HTML_URL_REGEX, data):
found_url = unescape(match.groupdict()['absurl'])
found_url = clean_url(
found_url,
force_scheme=parts[0]) # Use the main page's scheme
result.add(found_url)
return result | [
"Extracts the URLs from an HTML document."
]
|
Please provide a description of the function:def prune_urls(url_set, start_url, allowed_list, ignored_list):
result = set()
for url in url_set:
allowed = False
for allow_url in allowed_list:
if url.startswith(allow_url):
allowed = True
break
if not allowed:
continue
ignored = False
for ignore_url in ignored_list:
if url.startswith(ignore_url):
ignored = True
break
if ignored:
continue
prefix, suffix = (url.rsplit('.', 1) + [''])[:2]
if suffix.lower() in IGNORE_SUFFIXES:
continue
result.add(url)
return result | [
"Prunes URLs that should be ignored."
]
|
Please provide a description of the function:def real_main(start_url=None,
ignore_prefixes=None,
upload_build_id=None,
upload_release_name=None):
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
item = SiteDiff(
start_url=start_url,
ignore_prefixes=ignore_prefixes,
upload_build_id=upload_build_id,
upload_release_name=upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join() | [
"Runs the site_diff."
]
|
Please provide a description of the function:def render_or_send(func, message):
if request.endpoint != func.func_name:
mail.send(message)
if (current_user.is_authenticated() and current_user.superuser):
return render_template('debug_email.html', message=message) | [
"Renders an email message for debugging or actually sends it."
]
|
Please provide a description of the function:def send_ready_for_review(build_id, release_name, release_number):
build = models.Build.query.get(build_id)
if not build.send_email:
logging.debug(
'Not sending ready for review email because build does not have '
'email enabled. build_id=%r', build.id)
return
ops = operations.BuildOps(build_id)
release, run_list, stats_dict, _ = ops.get_release(
release_name, release_number)
if not run_list:
logging.debug(
'Not sending ready for review email because there are '
' no runs. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
title = '%s: %s - Ready for review' % (build.name, release.name)
email_body = render_template(
'email_ready_for_review.html',
build=build,
release=release,
run_list=run_list,
stats_dict=stats_dict)
recipients = []
if build.email_alias:
recipients.append(build.email_alias)
else:
for user in build.owners:
recipients.append(user.email_address)
if not recipients:
logging.debug(
'Not sending ready for review email because there are no '
'recipients. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
message = Message(title, recipients=recipients)
message.html = email_body
logging.info('Sending ready for review email for build_id=%r, '
'release_name=%r, release_number=%d to %r',
build.id, release.name, release.number, recipients)
return render_or_send(send_ready_for_review, message) | [
"Sends an email indicating that the release is ready for review."
]
|
Please provide a description of the function:def homepage():
if current_user.is_authenticated():
if not login_fresh():
logging.debug('User needs a fresh token')
abort(login.needs_refresh())
auth.claim_invitations(current_user)
build_list = operations.UserOps(current_user.get_id()).get_builds()
return render_template(
'home.html',
build_list=build_list,
show_video_and_promo_text=app.config['SHOW_VIDEO_AND_PROMO_TEXT']) | [
"Renders the homepage."
]
|
Please provide a description of the function:def new_build():
form = forms.BuildForm()
if form.validate_on_submit():
build = models.Build()
form.populate_obj(build)
build.owners.append(current_user)
db.session.add(build)
db.session.flush()
auth.save_admin_log(build, created_build=True, message=build.name)
db.session.commit()
operations.UserOps(current_user.get_id()).evict()
logging.info('Created build via UI: build_id=%r, name=%r',
build.id, build.name)
return redirect(url_for('view_build', id=build.id))
return render_template(
'new_build.html',
build_form=form) | [
"Page for crediting or editing a build."
]
|
Please provide a description of the function:def view_build():
build = g.build
page_size = min(request.args.get('page_size', 10, type=int), 50)
offset = request.args.get('offset', 0, type=int)
ops = operations.BuildOps(build.id)
has_next_page, candidate_list, stats_counts = ops.get_candidates(
page_size, offset)
# Collate by release name, order releases by latest creation. Init stats.
release_dict = {}
created_dict = {}
run_stats_dict = {}
for candidate in candidate_list:
release_list = release_dict.setdefault(candidate.name, [])
release_list.append(candidate)
max_created = created_dict.get(candidate.name, candidate.created)
created_dict[candidate.name] = max(candidate.created, max_created)
run_stats_dict[candidate.id] = dict(
runs_total=0,
runs_complete=0,
runs_successful=0,
runs_failed=0,
runs_baseline=0,
runs_pending=0)
# Sort each release by candidate number descending
for release_list in release_dict.itervalues():
release_list.sort(key=lambda x: x.number, reverse=True)
# Sort all releases by created time descending
release_age_list = [
(value, key) for key, value in created_dict.iteritems()]
release_age_list.sort(reverse=True)
release_name_list = [key for _, key in release_age_list]
# Count totals for each run state within that release.
for candidate_id, status, count in stats_counts:
stats_dict = run_stats_dict[candidate_id]
for key in ops.get_stats_keys(status):
stats_dict[key] += count
return render_template(
'view_build.html',
build=build,
release_name_list=release_name_list,
release_dict=release_dict,
run_stats_dict=run_stats_dict,
has_next_page=has_next_page,
current_offset=offset,
next_offset=offset + page_size,
last_offset=max(0, offset - page_size),
page_size=page_size) | [
"Page for viewing all releases in a build."
]
|
Please provide a description of the function:def view_release():
build = g.build
if request.method == 'POST':
form = forms.ReleaseForm(request.form)
else:
form = forms.ReleaseForm(request.args)
form.validate()
ops = operations.BuildOps(build.id)
release, run_list, stats_dict, approval_log = ops.get_release(
form.name.data, form.number.data)
if not release:
abort(404)
if request.method == 'POST':
decision_states = (
models.Release.REVIEWING,
models.Release.RECEIVING,
models.Release.PROCESSING)
if form.good.data and release.status in decision_states:
release.status = models.Release.GOOD
auth.save_admin_log(build, release_good=True, release=release)
elif form.bad.data and release.status in decision_states:
release.status = models.Release.BAD
auth.save_admin_log(build, release_bad=True, release=release)
elif form.reviewing.data and release.status in (
models.Release.GOOD, models.Release.BAD):
release.status = models.Release.REVIEWING
auth.save_admin_log(build, release_reviewing=True, release=release)
else:
logging.warning(
'Bad state transition for name=%r, number=%r, form=%r',
release.name, release.number, form.data)
abort(400)
db.session.add(release)
db.session.commit()
ops.evict()
return redirect(url_for(
'view_release',
id=build.id,
name=release.name,
number=release.number))
# Update form values for rendering
form.good.data = True
form.bad.data = True
form.reviewing.data = True
return render_template(
'view_release.html',
build=build,
release=release,
run_list=run_list,
release_form=form,
approval_log=approval_log,
stats_dict=stats_dict) | [
"Page for viewing all tests runs in a release."
]
|
Please provide a description of the function:def _get_artifact_context(run, file_type):
sha1sum = None
image_file = False
log_file = False
config_file = False
if request.path == '/image':
image_file = True
if file_type == 'before':
sha1sum = run.ref_image
elif file_type == 'diff':
sha1sum = run.diff_image
elif file_type == 'after':
sha1sum = run.image
else:
abort(400)
elif request.path == '/log':
log_file = True
if file_type == 'before':
sha1sum = run.ref_log
elif file_type == 'diff':
sha1sum = run.diff_log
elif file_type == 'after':
sha1sum = run.log
else:
abort(400)
elif request.path == '/config':
config_file = True
if file_type == 'before':
sha1sum = run.ref_config
elif file_type == 'after':
sha1sum = run.config
else:
abort(400)
return image_file, log_file, config_file, sha1sum | [
"Gets the artifact details for the given run and file_type."
]
|
Please provide a description of the function:def view_run():
build = g.build
if request.method == 'POST':
form = forms.RunForm(request.form)
else:
form = forms.RunForm(request.args)
form.validate()
ops = operations.BuildOps(build.id)
run, next_run, previous_run, approval_log = ops.get_run(
form.name.data, form.number.data, form.test.data)
if not run:
abort(404)
file_type = form.type.data
image_file, log_file, config_file, sha1sum = (
_get_artifact_context(run, file_type))
if request.method == 'POST':
if form.approve.data and run.status == models.Run.DIFF_FOUND:
run.status = models.Run.DIFF_APPROVED
auth.save_admin_log(build, run_approved=True, run=run)
elif form.disapprove.data and run.status == models.Run.DIFF_APPROVED:
run.status = models.Run.DIFF_FOUND
auth.save_admin_log(build, run_rejected=True, run=run)
else:
abort(400)
db.session.add(run)
db.session.commit()
ops.evict()
return redirect(url_for(
request.endpoint,
id=build.id,
name=run.release.name,
number=run.release.number,
test=run.name,
type=file_type))
# Update form values for rendering
form.approve.data = True
form.disapprove.data = True
context = dict(
build=build,
release=run.release,
run=run,
run_form=form,
previous_run=previous_run,
next_run=next_run,
file_type=file_type,
image_file=image_file,
log_file=log_file,
config_file=config_file,
sha1sum=sha1sum,
approval_log=approval_log)
if file_type:
template_name = 'view_artifact.html'
else:
template_name = 'view_run.html'
response = flask.Response(render_template(template_name, **context))
return response | [
"Page for viewing before/after for a specific test run."
]
|
Please provide a description of the function:def register(coordinator):
timer_queue = Queue.Queue()
coordinator.register(TimerItem, timer_queue)
coordinator.worker_threads.append(
TimerThread(timer_queue, coordinator.input_queue)) | [
"Registers this module as a worker with the given coordinator."
]
|
Please provide a description of the function:def get_coordinator():
workflow_queue = Queue.Queue()
complete_queue = Queue.Queue()
coordinator = WorkflowThread(workflow_queue, complete_queue)
coordinator.register(WorkflowItem, workflow_queue)
return coordinator | [
"Creates a coordinator and returns it."
]
|
Please provide a description of the function:def _print_repr(self, depth):
if depth <= 0:
return '%s.%s#%d' % (
self.__class__.__module__,
self.__class__.__name__,
id(self))
return '%s.%s(%s)#%d' % (
self.__class__.__module__,
self.__class__.__name__,
self._print_tree(self._get_dict_for_repr(), depth - 1),
id(self)) | [
"Print this WorkItem to the given stack depth.\n\n The depth parameter ensures that we can print WorkItems in\n arbitrarily long chains without hitting the max stack depth.\n This can happen with WaitForUrlWorkflowItems, which\n create long chains of small waits.\n "
]
|
Please provide a description of the function:def error(self):
# Copy the error from any failed item to be the error for the whole
# barrier. The first error seen "wins". Also handles the case where
# the WorkItems passed into the barrier have already completed and
# been marked with errors.
for item in self:
if isinstance(item, WorkItem) and item.error:
return item.error
return None | [
"Returns the error for this barrier and all work items, if any."
]
|
Please provide a description of the function:def outstanding(self):
# Allow the same WorkItem to be yielded multiple times but not
# count towards blocking the barrier.
done_count = 0
for item in self:
if not self.wait_any and item.fire_and_forget:
# Only count fire_and_forget items as done if this is
# *not* a WaitAny barrier. We only want to return control
# to the caller when at least one of the blocking items
# has completed.
done_count += 1
elif item.done:
done_count += 1
if self.wait_any and done_count > 0:
return False
if done_count == len(self):
return False
return True | [
"Returns whether or not this barrier has pending work."
]
|
Please provide a description of the function:def get_item(self):
if self.was_list:
result = ResultList()
for item in self:
if isinstance(item, WorkflowItem):
if item.done and not item.error:
result.append(item.result)
else:
# When there's an error or the workflow isn't done yet,
# just return the original WorkflowItem so the caller
# can inspect its entire state.
result.append(item)
else:
result.append(item)
return result
else:
return self[0] | [
"Returns the item to send back into the workflow generator."
]
|
Please provide a description of the function:def start(self):
assert not self.interrupted
for thread in self.worker_threads:
thread.start()
WorkerThread.start(self) | [
"Starts the coordinator thread and all related worker threads."
]
|
Please provide a description of the function:def stop(self):
if self.interrupted:
return
for thread in self.worker_threads:
thread.interrupted = True
self.interrupted = True | [
"Stops the coordinator thread and all related threads."
]
|
Please provide a description of the function:def join(self):
for thread in self.worker_threads:
thread.join()
WorkerThread.join(self) | [
"Joins the coordinator thread and all worker threads."
]
|
Please provide a description of the function:def wait_one(self):
while True:
try:
item = self.output_queue.get(True, self.polltime)
except Queue.Empty:
continue
except KeyboardInterrupt:
LOGGER.debug('Exiting')
return
else:
item.check_result()
return | [
"Waits until this worker has finished one work item or died."
]
|
Please provide a description of the function:def superuser_required(f):
@functools.wraps(f)
@login_required
def wrapped(*args, **kwargs):
if not (current_user.is_authenticated() and current_user.superuser):
abort(403)
return f(*args, **kwargs)
return wrapped | [
"Requires the requestor to be a super user."
]
|
Please provide a description of the function:def can_user_access_build(param_name):
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
if not build_id:
logging.debug('Build ID in param_name=%r was missing', param_name)
abort(400)
ops = operations.UserOps(current_user.get_id())
build, user_is_owner = ops.owns_build(build_id)
if not build:
logging.debug('Could not find build_id=%r', build_id)
abort(404)
if current_user.is_authenticated() and not user_is_owner:
# Assume the user should be able to access the build but can't because
# the cache is out of date. This forces the cache to repopulate, any
# outstanding user invitations to be completed, hopefully resulting in
# the user having access to the build.
ops.evict()
claim_invitations(current_user)
build, user_is_owner = ops.owns_build(build_id)
if not user_is_owner:
if current_user.is_authenticated() and current_user.superuser:
pass
elif request.method != 'GET':
logging.debug('No way to log in user via modifying request')
abort(403)
elif build.public:
pass
elif current_user.is_authenticated():
logging.debug('User does not have access to this build')
abort(flask.Response('You cannot access this build', 403))
else:
logging.debug('Redirecting user to login to get build access')
abort(login.unauthorized())
elif not login_fresh():
logging.debug('User login is old; forcing refresh')
abort(login.needs_refresh())
return build | [
"Determines if the current user can access the build ID in the request.\n\n Args:\n param_name: Parameter name to use for getting the build ID from the\n request. Will fetch from GET or POST requests.\n\n Returns:\n The build the user has access to.\n "
]
|
Please provide a description of the function:def build_access_required(function_or_param_name):
def get_wrapper(param_name, f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
g.build = can_user_access_build(param_name)
if not utils.is_production():
# Insert a sleep to emulate page loading in production.
time.sleep(0.5)
return f(*args, **kwargs)
return wrapped
if isinstance(function_or_param_name, basestring):
return lambda f: get_wrapper(function_or_param_name, f)
else:
return get_wrapper('id', function_or_param_name) | [
"Decorator ensures user has access to the build ID in the request.\n\n May be used in two ways:\n\n @build_access_required\n def my_func(build):\n ...\n\n @build_access_required('custom_build_id_param')\n def my_func(build):\n ...\n\n Always calls the given function with the models.Build entity as the\n first positional argument.\n "
]
|
Please provide a description of the function:def _get_api_key_ops():
auth_header = request.authorization
if not auth_header:
logging.debug('API request lacks authorization header')
abort(flask.Response(
'API key required', 401,
{'WWW-Authenticate': 'Basic realm="API key required"'}))
return operations.ApiKeyOps(auth_header.username, auth_header.password) | [
"Gets the operations.ApiKeyOps instance for the current request."
]
|
Please provide a description of the function:def current_api_key():
if app.config.get('IGNORE_AUTH'):
return models.ApiKey(
id='anonymous_superuser',
secret='',
superuser=True)
ops = _get_api_key_ops()
api_key = ops.get()
logging.debug('Authenticated as API key=%r', api_key.id)
return api_key | [
"Determines the API key for the current request.\n\n Returns:\n The ApiKey instance.\n "
]
|
Please provide a description of the function:def can_api_key_access_build(param_name):
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
utils.jsonify_assert(build_id, 'build_id required')
if app.config.get('IGNORE_AUTH'):
api_key = models.ApiKey(
id='anonymous_superuser',
secret='',
superuser=True)
build = models.Build.query.get(build_id)
utils.jsonify_assert(build is not None, 'build must exist', 404)
else:
ops = _get_api_key_ops()
api_key, build = ops.can_access_build(build_id)
return api_key, build | [
"Determines if the current API key can access the build in the request.\n\n Args:\n param_name: Parameter name to use for getting the build ID from the\n request. Will fetch from GET or POST requests.\n\n Returns:\n (api_key, build) The API Key and the Build it has access to.\n "
]
|
Please provide a description of the function:def build_api_access_required(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
g.api_key, g.build = can_api_key_access_build('build_id')
return f(*args, **kwargs)
return wrapped | [
"Decorator ensures API key has access to the build ID in the request.\n\n Always calls the given function with the models.Build entity as the\n first positional argument.\n "
]
|
Please provide a description of the function:def superuser_api_key_required(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
api_key = current_api_key()
g.api_key = api_key
utils.jsonify_assert(
api_key.superuser,
'API key=%r must be a super user' % api_key.id,
403)
return f(*args, **kwargs)
return wrapped | [
"Decorator ensures only superuser API keys can request this function."
]
|
Please provide a description of the function:def manage_api_keys():
build = g.build
create_form = forms.CreateApiKeyForm()
if create_form.validate_on_submit():
api_key = models.ApiKey()
create_form.populate_obj(api_key)
api_key.id = utils.human_uuid()
api_key.secret = utils.password_uuid()
save_admin_log(build, created_api_key=True, message=api_key.id)
db.session.add(api_key)
db.session.commit()
logging.info('Created API key=%r for build_id=%r',
api_key.id, build.id)
return redirect(url_for('manage_api_keys', build_id=build.id))
create_form.build_id.data = build.id
api_key_query = (
models.ApiKey.query
.filter_by(build_id=build.id)
.order_by(models.ApiKey.created.desc())
.limit(1000))
revoke_form_list = []
for api_key in api_key_query:
form = forms.RevokeApiKeyForm()
form.id.data = api_key.id
form.build_id.data = build.id
form.revoke.data = True
revoke_form_list.append((api_key, form))
return render_template(
'view_api_keys.html',
build=build,
create_form=create_form,
revoke_form_list=revoke_form_list) | [
"Page for viewing and creating API keys."
]
|
Please provide a description of the function:def revoke_api_key():
build = g.build
form = forms.RevokeApiKeyForm()
if form.validate_on_submit():
api_key = models.ApiKey.query.get(form.id.data)
if api_key.build_id != build.id:
logging.debug('User does not have access to API key=%r',
api_key.id)
abort(403)
api_key.active = False
save_admin_log(build, revoked_api_key=True, message=api_key.id)
db.session.add(api_key)
db.session.commit()
ops = operations.ApiKeyOps(api_key.id, api_key.secret)
ops.evict()
return redirect(url_for('manage_api_keys', build_id=build.id)) | [
"Form submission handler for revoking API keys."
]
|
Please provide a description of the function:def claim_invitations(user):
# See if there are any build invitations present for the user with this
# email address. If so, replace all those invitations with the real user.
invitation_user_id = '%s:%s' % (
models.User.EMAIL_INVITATION, user.email_address)
invitation_user = models.User.query.get(invitation_user_id)
if invitation_user:
invited_build_list = list(invitation_user.builds)
if not invited_build_list:
return
db.session.add(user)
logging.debug('Found %d build admin invitations for id=%r, user=%r',
len(invited_build_list), invitation_user_id, user)
for build in invited_build_list:
build.owners.remove(invitation_user)
if not build.is_owned_by(user.id):
build.owners.append(user)
logging.debug('Claiming invitation for build_id=%r', build.id)
save_admin_log(build, invite_accepted=True)
else:
logging.debug('User already owner of build. '
'id=%r, build_id=%r', user.id, build.id)
db.session.add(build)
db.session.delete(invitation_user)
db.session.commit()
# Re-add the user to the current session so we can query with it.
db.session.add(current_user) | [
"Claims any pending invitations for the given user's email address."
]
|
Please provide a description of the function:def manage_admins():
build = g.build
# Do not show cached data
db.session.add(build)
db.session.refresh(build)
add_form = forms.AddAdminForm()
if add_form.validate_on_submit():
invitation_user_id = '%s:%s' % (
models.User.EMAIL_INVITATION, add_form.email_address.data)
invitation_user = models.User.query.get(invitation_user_id)
if not invitation_user:
invitation_user = models.User(
id=invitation_user_id,
email_address=add_form.email_address.data)
db.session.add(invitation_user)
db.session.add(build)
db.session.add(invitation_user)
db.session.refresh(build, lockmode='update')
build.owners.append(invitation_user)
save_admin_log(build, invited_new_admin=True,
message=invitation_user.email_address)
db.session.commit()
logging.info('Added user=%r as owner to build_id=%r',
invitation_user.id, build.id)
return redirect(url_for('manage_admins', build_id=build.id))
add_form.build_id.data = build.id
revoke_form_list = []
for user in build.owners:
form = forms.RemoveAdminForm()
form.user_id.data = user.id
form.build_id.data = build.id
form.revoke.data = True
revoke_form_list.append((user, form))
return render_template(
'view_admins.html',
build=build,
add_form=add_form,
revoke_form_list=revoke_form_list) | [
"Page for viewing and managing build admins."
]
|
Please provide a description of the function:def revoke_admin():
build = g.build
form = forms.RemoveAdminForm()
if form.validate_on_submit():
user = models.User.query.get(form.user_id.data)
if not user:
logging.debug('User being revoked admin access does not exist.'
'id=%r, build_id=%r', form.user_id.data, build.id)
abort(400)
if user == current_user:
logging.debug('User trying to remove themself as admin. '
'id=%r, build_id=%r', user.id, build.id)
abort(400)
db.session.add(build)
db.session.add(user)
db.session.refresh(build, lockmode='update')
db.session.refresh(user, lockmode='update')
user_is_owner = build.owners.filter_by(id=user.id)
if not user_is_owner:
logging.debug('User being revoked admin access is not owner. '
'id=%r, build_id=%r.', user.id, build.id)
abort(400)
build.owners.remove(user)
save_admin_log(build, revoked_admin=True, message=user.email_address)
db.session.commit()
operations.UserOps(user.get_id()).evict()
return redirect(url_for('manage_admins', build_id=build.id)) | [
"Form submission handler for revoking admin access to a build."
]
|
Please provide a description of the function:def save_admin_log(build, **kwargs):
message = kwargs.pop('message', None)
release = kwargs.pop('release', None)
run = kwargs.pop('run', None)
if not len(kwargs) == 1:
raise TypeError('Must specify a LOG_TYPE argument')
log_enum = kwargs.keys()[0]
log_type = getattr(models.AdminLog, log_enum.upper(), None)
if not log_type:
raise TypeError('Bad log_type argument: %s' % log_enum)
if current_user.is_anonymous():
user_id = None
else:
user_id = current_user.get_id()
log = models.AdminLog(
build_id=build.id,
log_type=log_type,
message=message,
user_id=user_id)
if release:
log.release_id = release.id
if run:
log.run_id = run.id
log.release_id = run.release_id
db.session.add(log) | [
"Saves an action to the admin log."
]
|
Please provide a description of the function:def view_admin_log():
build = g.build
# TODO: Add paging
log_list = (
models.AdminLog.query
.filter_by(build_id=build.id)
.order_by(models.AdminLog.created.desc())
.all())
return render_template(
'view_admin_log.html',
build=build,
log_list=log_list) | [
"Page for viewing the log of admin activity."
]
|
Please provide a description of the function:def verify_binary(flag_name, process_args=None):
if process_args is None:
process_args = []
path = getattr(FLAGS, flag_name)
if not path:
logging.error('Flag %r not set' % flag_name)
sys.exit(1)
with open(os.devnull, 'w') as dev_null:
try:
subprocess.check_call(
[path] + process_args,
stdout=dev_null,
stderr=subprocess.STDOUT)
except:
logging.exception('--%s binary at path %r does not work',
flag_name, path)
sys.exit(1) | [
"Exits the program if the binary from the given flag doesn't run.\n\n Args:\n flag_name: Name of the flag that should be the path to the binary.\n process_args: Args to pass to the binary to do nothing but verify\n that it's working correctly (something like \"--version\") is good.\n Optional. Defaults to no args.\n\n Raises:\n SystemExit with error if the process did not work.\n "
]
|
Please provide a description of the function:def create_release():
build = g.build
release_name = request.form.get('release_name')
utils.jsonify_assert(release_name, 'release_name required')
url = request.form.get('url')
utils.jsonify_assert(release_name, 'url required')
release = models.Release(
name=release_name,
url=url,
number=1,
build_id=build.id)
last_candidate = (
models.Release.query
.filter_by(build_id=build.id, name=release_name)
.order_by(models.Release.number.desc())
.first())
if last_candidate:
release.number += last_candidate.number
if last_candidate.status == models.Release.PROCESSING:
canceled_task_count = work_queue.cancel(
release_id=last_candidate.id)
logging.info('Canceling %d tasks for previous attempt '
'build_id=%r, release_name=%r, release_number=%d',
canceled_task_count, build.id, last_candidate.name,
last_candidate.number)
last_candidate.status = models.Release.BAD
db.session.add(last_candidate)
db.session.add(release)
db.session.commit()
signals.release_updated_via_api.send(app, build=build, release=release)
logging.info('Created release: build_id=%r, release_name=%r, url=%r, '
'release_number=%d', build.id, release.name,
url, release.number)
return flask.jsonify(
success=True,
build_id=build.id,
release_name=release.name,
release_number=release.number,
url=url) | [
"Creates a new release candidate for a build."
]
|
Please provide a description of the function:def _check_release_done_processing(release):
if release.status != models.Release.PROCESSING:
# NOTE: This statement also guards for situations where the user has
# prematurely specified that the release is good or bad. Once the user
# has done that, the system will not automatically move the release
# back into the 'reviewing' state or send the email notification below.
logging.info('Release not in processing state yet: build_id=%r, '
'name=%r, number=%d', release.build_id, release.name,
release.number)
return False
query = models.Run.query.filter_by(release_id=release.id)
for run in query:
if run.status == models.Run.NEEDS_DIFF:
# Still waiting for the diff to finish.
return False
if run.ref_config and not run.ref_image:
# Still waiting for the ref capture to process.
return False
if run.config and not run.image:
# Still waiting for the run capture to process.
return False
logging.info('Release done processing, now reviewing: build_id=%r, '
'name=%r, number=%d', release.build_id, release.name,
release.number)
# Send the email at the end of this request so we know it's only
# sent a single time (guarded by the release.status check above).
build_id = release.build_id
release_name = release.name
release_number = release.number
@utils.after_this_request
def send_notification_email(response):
emails.send_ready_for_review(build_id, release_name, release_number)
release.status = models.Release.REVIEWING
db.session.add(release)
return True | [
"Moves a release candidate to reviewing if all runs are done."
]
|
Please provide a description of the function:def _get_release_params():
release_name = request.form.get('release_name')
utils.jsonify_assert(release_name, 'release_name required')
release_number = request.form.get('release_number', type=int)
utils.jsonify_assert(release_number is not None, 'release_number required')
return release_name, release_number | [
"Gets the release params from the current request."
]
|
Please provide a description of the function:def _find_last_good_run(build):
run_name = request.form.get('run_name', type=str)
utils.jsonify_assert(run_name, 'run_name required')
last_good_release = (
models.Release.query
.filter_by(
build_id=build.id,
status=models.Release.GOOD)
.order_by(models.Release.created.desc())
.first())
last_good_run = None
if last_good_release:
logging.debug('Found last good release for: build_id=%r, '
'release_name=%r, release_number=%d',
build.id, last_good_release.name,
last_good_release.number)
last_good_run = (
models.Run.query
.filter_by(release_id=last_good_release.id, name=run_name)
.first())
if last_good_run:
logging.debug('Found last good run for: build_id=%r, '
'release_name=%r, release_number=%d, '
'run_name=%r',
build.id, last_good_release.name,
last_good_release.number, last_good_run.name)
return last_good_release, last_good_run | [
"Finds the last good release and run for a build."
]
|
Please provide a description of the function:def find_run():
build = g.build
last_good_release, last_good_run = _find_last_good_run(build)
if last_good_run:
return flask.jsonify(
success=True,
build_id=build.id,
release_name=last_good_release.name,
release_number=last_good_release.number,
run_name=last_good_run.name,
url=last_good_run.url,
image=last_good_run.image,
log=last_good_run.log,
config=last_good_run.config)
return utils.jsonify_error('Run not found') | [
"Finds the last good run of the given name for a release."
]
|
Please provide a description of the function:def _get_or_create_run(build):
release_name, release_number = _get_release_params()
run_name = request.form.get('run_name', type=str)
utils.jsonify_assert(run_name, 'run_name required')
release = (
models.Release.query
.filter_by(build_id=build.id, name=release_name, number=release_number)
.first())
utils.jsonify_assert(release, 'release does not exist')
run = (
models.Run.query
.filter_by(release_id=release.id, name=run_name)
.first())
if not run:
# Ignore re-reports of the same run name for this release.
logging.info('Created run: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r',
build.id, release.name, release.number, run_name)
run = models.Run(
release_id=release.id,
name=run_name,
status=models.Run.DATA_PENDING)
db.session.add(run)
db.session.flush()
return release, run | [
"Gets a run for a build or creates it if it does not exist."
]
|
Please provide a description of the function:def _enqueue_capture(build, release, run, url, config_data, baseline=False):
# Validate the JSON config parses.
try:
config_dict = json.loads(config_data)
except Exception, e:
abort(utils.jsonify_error(e))
# Rewrite the config JSON to include the URL specified in this request.
# Blindly overwrite anything that was there.
config_dict['targetUrl'] = url
config_data = json.dumps(config_dict)
config_artifact = _save_artifact(build, config_data, 'application/json')
db.session.add(config_artifact)
db.session.flush()
suffix = ''
if baseline:
suffix = ':baseline'
task_id = '%s:%s%s' % (run.id, hashlib.sha1(url).hexdigest(), suffix)
logging.info('Enqueueing capture task=%r, baseline=%r', task_id, baseline)
work_queue.add(
constants.CAPTURE_QUEUE_NAME,
payload=dict(
build_id=build.id,
release_name=release.name,
release_number=release.number,
run_name=run.name,
url=url,
config_sha1sum=config_artifact.id,
baseline=baseline,
),
build_id=build.id,
release_id=release.id,
run_id=run.id,
source='request_run',
task_id=task_id)
# Set the URL and config early to indicate to report_run that there is
# still data pending even if 'image' and 'ref_image' are unset.
if baseline:
run.ref_url = url
run.ref_config = config_artifact.id
else:
run.url = url
run.config = config_artifact.id | [
"Enqueues a task to run a capture process."
]
|
Please provide a description of the function:def request_run():
build = g.build
current_release, current_run = _get_or_create_run(build)
current_url = request.form.get('url', type=str)
config_data = request.form.get('config', default='{}', type=str)
utils.jsonify_assert(current_url, 'url to capture required')
utils.jsonify_assert(config_data, 'config document required')
config_artifact = _enqueue_capture(
build, current_release, current_run, current_url, config_data)
ref_url = request.form.get('ref_url', type=str)
ref_config_data = request.form.get('ref_config', type=str)
utils.jsonify_assert(
bool(ref_url) == bool(ref_config_data),
'ref_url and ref_config must both be specified or not specified')
if ref_url and ref_config_data:
ref_config_artifact = _enqueue_capture(
build, current_release, current_run, ref_url, ref_config_data,
baseline=True)
else:
_, last_good_run = _find_last_good_run(build)
if last_good_run:
current_run.ref_url = last_good_run.url
current_run.ref_image = last_good_run.image
current_run.ref_log = last_good_run.log
current_run.ref_config = last_good_run.config
db.session.add(current_run)
db.session.commit()
signals.run_updated_via_api.send(
app, build=build, release=current_release, run=current_run)
return flask.jsonify(
success=True,
build_id=build.id,
release_name=current_release.name,
release_number=current_release.number,
run_name=current_run.name,
url=current_run.url,
config=current_run.config,
ref_url=current_run.ref_url,
ref_config=current_run.ref_config) | [
"Requests a new run for a release candidate."
]
|
Please provide a description of the function:def report_run():
build = g.build
release, run = _get_or_create_run(build)
db.session.refresh(run, lockmode='update')
current_url = request.form.get('url', type=str)
current_image = request.form.get('image', type=str)
current_log = request.form.get('log', type=str)
current_config = request.form.get('config', type=str)
ref_url = request.form.get('ref_url', type=str)
ref_image = request.form.get('ref_image', type=str)
ref_log = request.form.get('ref_log', type=str)
ref_config = request.form.get('ref_config', type=str)
diff_failed = request.form.get('diff_failed', type=str)
diff_image = request.form.get('diff_image', type=str)
diff_log = request.form.get('diff_log', type=str)
distortion = request.form.get('distortion', default=None, type=float)
run_failed = request.form.get('run_failed', type=str)
if current_url:
run.url = current_url
if current_image:
run.image = current_image
if current_log:
run.log = current_log
if current_config:
run.config = current_config
if current_image or current_log or current_config:
logging.info('Saving run data: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r, url=%r, '
'image=%r, log=%r, config=%r, run_failed=%r',
build.id, release.name, release.number, run.name,
run.url, run.image, run.log, run.config, run_failed)
if ref_url:
run.ref_url = ref_url
if ref_image:
run.ref_image = ref_image
if ref_log:
run.ref_log = ref_log
if ref_config:
run.ref_config = ref_config
if ref_image or ref_log or ref_config:
logging.info('Saved reference data: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r, ref_url=%r, '
'ref_image=%r, ref_log=%r, ref_config=%r',
build.id, release.name, release.number, run.name,
run.ref_url, run.ref_image, run.ref_log, run.ref_config)
if diff_image:
run.diff_image = diff_image
if diff_log:
run.diff_log = diff_log
if distortion:
run.distortion = distortion
if diff_image or diff_log:
logging.info('Saved pdiff: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r, diff_image=%r, '
'diff_log=%r, diff_failed=%r, distortion=%r',
build.id, release.name, release.number, run.name,
run.diff_image, run.diff_log, diff_failed, distortion)
if run.image and run.diff_image:
run.status = models.Run.DIFF_FOUND
elif run.image and run.ref_image and not run.diff_log:
run.status = models.Run.NEEDS_DIFF
elif run.image and run.ref_image and not diff_failed:
run.status = models.Run.DIFF_NOT_FOUND
elif run.image and not run.ref_config:
run.status = models.Run.NO_DIFF_NEEDED
elif run_failed or diff_failed:
run.status = models.Run.FAILED
else:
# NOTE: Intentionally do not transition state here in the default case.
# We allow multiple background workers to be writing to the same Run in
# parallel updating its various properties.
pass
# TODO: Verify the build has access to both the current_image and
# the reference_sha1sum so they can't make a diff from a black image
# and still see private data in the diff image.
if run.status == models.Run.NEEDS_DIFF:
task_id = '%s:%s:%s' % (run.id, run.image, run.ref_image)
logging.info('Enqueuing pdiff task=%r', task_id)
work_queue.add(
constants.PDIFF_QUEUE_NAME,
payload=dict(
build_id=build.id,
release_name=release.name,
release_number=release.number,
run_name=run.name,
run_sha1sum=run.image,
reference_sha1sum=run.ref_image,
),
build_id=build.id,
release_id=release.id,
run_id=run.id,
source='report_run',
task_id=task_id)
# Flush the run so querying for Runs in _check_release_done_processing
# will be find the new run too and we won't deadlock.
db.session.add(run)
db.session.flush()
_check_release_done_processing(release)
db.session.commit()
signals.run_updated_via_api.send(
app, build=build, release=release, run=run)
logging.info('Updated run: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r, status=%r',
build.id, release.name, release.number, run.name, run.status)
return flask.jsonify(success=True) | [
"Reports data for a run for a release candidate."
]
|
Please provide a description of the function:def runs_done():
build = g.build
release_name, release_number = _get_release_params()
release = (
models.Release.query
.filter_by(build_id=build.id, name=release_name, number=release_number)
.with_lockmode('update')
.first())
utils.jsonify_assert(release, 'Release does not exist')
release.status = models.Release.PROCESSING
db.session.add(release)
_check_release_done_processing(release)
db.session.commit()
signals.release_updated_via_api.send(app, build=build, release=release)
logging.info('Runs done for release: build_id=%r, release_name=%r, '
'release_number=%d', build.id, release.name, release.number)
results_url = url_for(
'view_release',
id=build.id,
name=release.name,
number=release.number,
_external=True)
return flask.jsonify(
success=True,
results_url=results_url) | [
"Marks a release candidate as having all runs reported."
]
|
Please provide a description of the function:def _save_artifact(build, data, content_type):
sha1sum = hashlib.sha1(data).hexdigest()
artifact = models.Artifact.query.filter_by(id=sha1sum).first()
if artifact:
logging.debug('Upload already exists: artifact_id=%r', sha1sum)
else:
logging.info('Upload received: artifact_id=%r, content_type=%r',
sha1sum, content_type)
artifact = models.Artifact(
id=sha1sum,
content_type=content_type,
data=data)
_artifact_created(artifact)
artifact.owners.append(build)
return artifact | [
"Saves an artifact to the DB and returns it."
]
|
Please provide a description of the function:def upload():
build = g.build
utils.jsonify_assert(len(request.files) == 1,
'Need exactly one uploaded file')
file_storage = request.files.values()[0]
data = file_storage.read()
content_type, _ = mimetypes.guess_type(file_storage.filename)
artifact = _save_artifact(build, data, content_type)
db.session.add(artifact)
db.session.commit()
return flask.jsonify(
success=True,
build_id=build.id,
sha1sum=artifact.id,
content_type=content_type) | [
"Uploads an artifact referenced by a run."
]
|
Please provide a description of the function:def _get_artifact_response(artifact):
response = flask.Response(
artifact.data,
mimetype=artifact.content_type)
response.cache_control.public = True
response.cache_control.max_age = 8640000
response.set_etag(artifact.id)
return response | [
"Gets the response object for the given artifact.\n\n This method may be overridden in environments that have a different way of\n storing artifact files, such as on-disk or S3.\n "
]
|
Please provide a description of the function:def download():
# Allow users with access to the build to download the file. Falls back
# to API keys with access to the build. Prefer user first for speed.
try:
build = auth.can_user_access_build('build_id')
except HTTPException:
logging.debug('User access to artifact failed. Trying API key.')
_, build = auth.can_api_key_access_build('build_id')
sha1sum = request.args.get('sha1sum', type=str)
if not sha1sum:
logging.debug('Artifact sha1sum=%r not supplied', sha1sum)
abort(404)
artifact = models.Artifact.query.get(sha1sum)
if not artifact:
logging.debug('Artifact sha1sum=%r does not exist', sha1sum)
abort(404)
build_id = request.args.get('build_id', type=int)
if not build_id:
logging.debug('build_id missing for artifact sha1sum=%r', sha1sum)
abort(404)
is_owned = artifact.owners.filter_by(id=build_id).first()
if not is_owned:
logging.debug('build_id=%r not owner of artifact sha1sum=%r',
build_id, sha1sum)
abort(403)
# Make sure there are no Set-Cookie headers on the response so this
# request is cachable by all HTTP frontends.
@utils.after_this_request
def no_session(response):
if 'Set-Cookie' in response.headers:
del response.headers['Set-Cookie']
if not utils.is_production():
# Insert a sleep to emulate how the page loading looks in production.
time.sleep(1.5)
if request.if_none_match and request.if_none_match.contains(sha1sum):
response = flask.Response(status=304)
return response
return _get_artifact_response(artifact) | [
"Downloads an artifact by it's content hash."
]
|
Please provide a description of the function:def register(coordinator):
utils.verify_binary('pdiff_compare_binary', ['-version'])
utils.verify_binary('pdiff_composite_binary', ['-version'])
assert FLAGS.pdiff_threads > 0
assert FLAGS.queue_server_prefix
item = queue_worker.RemoteQueueWorkflow(
constants.PDIFF_QUEUE_NAME,
DoPdiffQueueWorkflow,
max_tasks=FLAGS.pdiff_threads,
wait_seconds=FLAGS.pdiff_wait_seconds)
item.root = True
coordinator.input_queue.put(item) | [
"Registers this module as a worker with the given coordinator."
]
|
Please provide a description of the function:def evict(self):
logging.debug('Evicting cache for %r', self.cache_key)
_clear_version_cache(self.cache_key)
# Cause the cache key to be refreshed next time any operation is
# run to make sure we don't act on old cached data.
self.versioned_cache_key = None | [
"Evict all caches related to these operations."
]
|
Please provide a description of the function:def sort_run(run):
# Sort errors first, then by name. Also show errors that were manually
# approved, so the paging sort order stays the same even after users
# approve a diff on the run page.
if run.status in models.Run.DIFF_NEEDED_STATES:
return (0, run.name)
return (1, run.name) | [
"Sort function for runs within a release."
]
|
Please provide a description of the function:def parse(obj, required_properties=None, additional_properties=None,
ignore_optional_property_errors=None):
if not (required_properties is
additional_properties is
ignore_optional_property_errors is None):
with parsing(required_properties=required_properties,
additional_properties=additional_properties,
ignore_optional_property_errors=ignore_optional_property_errors):
return parse(obj)
validator = None
if isinstance(obj, Validator):
validator = obj
elif inspect.isclass(obj) and issubclass(obj, Validator):
validator = obj()
else:
try:
validator = _NAMED_VALIDATORS[obj]
except (KeyError, TypeError):
for factory in _VALIDATOR_FACTORIES:
validator = factory(obj)
if validator is not None:
break
else:
if inspect.isclass(validator) and issubclass(validator, Validator):
_NAMED_VALIDATORS[obj] = validator = validator()
if not isinstance(validator, Validator):
raise SchemaError("%r cannot be parsed as a Validator" % obj)
return validator | [
"Try to parse the given ``obj`` as a validator instance.\n\n :param obj: The object to be parsed. If it is a...:\n\n - :py:class:`Validator` instance, return it.\n - :py:class:`Validator` subclass, instantiate it without arguments and\n return it.\n - :py:attr:`~Validator.name` of a known :py:class:`Validator` subclass,\n instantiate the subclass without arguments and return it.\n - otherwise find the first registered :py:class:`Validator` factory that\n can create it. The search order is the reverse of the factory registration\n order. The caller is responsible for ensuring there are no ambiguous\n values that can be parsed by more than one factory.\n\n :param required_properties: Specifies for this parse call whether parsed\n :py:class:`~valideer.validators.Object` properties are required or\n optional by default. It can be:\n\n - ``True`` for required.\n - ``False`` for optional.\n - ``None`` to use the value of the\n :py:attr:`~valideer.validators.Object.REQUIRED_PROPERTIES` attribute.\n\n :param additional_properties: Specifies for this parse call the schema of\n all :py:class:`~valideer.validators.Object` properties that are not\n explicitly defined as optional or required. It can also be:\n\n - ``True`` to allow any value for additional properties.\n - ``False`` to disallow any additional properties.\n - :py:attr:`~valideer.validators.Object.REMOVE` to remove any additional\n properties from the adapted object.\n - ``None`` to use the value of the\n :py:attr:`~valideer.validators.Object.ADDITIONAL_PROPERTIES` attribute.\n\n :param ignore_optional_property_errors: Determines if invalid optional\n properties are ignored:\n\n - ``True`` to ignore invalid optional properties.\n - ``False`` to raise ValidationError for invalid optional properties.\n - ``None`` to use the value of the\n :py:attr:`~valideer.validators.Object.IGNORE_OPTIONAL_PROPERTY_ERRORS`\n attribute.\n\n :raises SchemaError: If no appropriate validator could be found.\n\n .. warning:: Passing ``required_properties`` and/or ``additional_properties``\n with value other than ``None`` may be non intuitive for schemas that\n involve nested validators. Take for example the following schema::\n\n v = V.parse({\n \"x\": \"integer\",\n \"child\": V.Nullable({\n \"y\": \"integer\"\n })\n }, required_properties=True)\n\n Here the top-level properties 'x' and 'child' are required but the nested\n 'y' property is not. This is because by the time :py:meth:`parse` is called,\n :py:class:`~valideer.validators.Nullable` has already parsed its argument\n with the default value of ``required_properties``. Several other builtin\n validators work similarly to :py:class:`~valideer.validators.Nullable`,\n accepting one or more schemas to parse. In order to parse an arbitrarily\n complex nested validator with the same value for ``required_properties``\n and/or ``additional_properties``, use the :py:func:`parsing` context\n manager instead::\n\n with V.parsing(required_properties=True):\n v = V.parse({\n \"x\": \"integer\",\n \"child\": V.Nullable({\n \"y\": \"integer\"\n })\n })\n "
]
|
Please provide a description of the function:def parsing(**kwargs):
from .validators import Object
with _VALIDATOR_FACTORIES_LOCK:
old_values = {}
for key, value in iteritems(kwargs):
if value is not None:
attr = key.upper()
old_values[key] = getattr(Object, attr)
setattr(Object, attr, value)
try:
yield
finally:
for key, value in iteritems(kwargs):
if value is not None:
setattr(Object, key.upper(), old_values[key]) | [
"\n Context manager for overriding the default validator parsing rules for the\n following code block.\n "
]
|
Please provide a description of the function:def register(name, validator):
if not isinstance(validator, Validator):
raise TypeError("Validator instance expected, %s given" % validator.__class__)
_NAMED_VALIDATORS[name] = validator | [
"Register a validator instance under the given ``name``."
]
|
Please provide a description of the function:def accepts(**schemas):
validate = parse(schemas).validate
@decorator
def validating(func, *args, **kwargs):
validate(inspect.getcallargs(func, *args, **kwargs), adapt=False)
return func(*args, **kwargs)
return validating | [
"Create a decorator for validating function parameters.\n\n Example::\n\n @accepts(a=\"number\", body={\"+field_ids\": [int], \"is_ok\": bool})\n def f(a, body):\n print (a, body[\"field_ids\"], body.get(\"is_ok\"))\n\n :param schemas: The schema for validating a given parameter.\n "
]
|
Please provide a description of the function:def returns(schema):
validate = parse(schema).validate
@decorator
def validating(func, *args, **kwargs):
ret = func(*args, **kwargs)
validate(ret, adapt=False)
return ret
return validating | [
"Create a decorator for validating function return value.\n\n Example::\n @accepts(a=int, b=int)\n @returns(int)\n def f(a, b):\n return a + b\n\n :param schema: The schema for adapting a given parameter.\n "
]
|
Please provide a description of the function:def adapts(**schemas):
validate = parse(schemas).validate
@decorator
def adapting(func, *args, **kwargs):
adapted = validate(inspect.getcallargs(func, *args, **kwargs), adapt=True)
argspec = inspect.getargspec(func)
if argspec.varargs is argspec.keywords is None:
# optimization for the common no varargs, no keywords case
return func(**adapted)
adapted_varargs = adapted.pop(argspec.varargs, ())
adapted_keywords = adapted.pop(argspec.keywords, {})
if not adapted_varargs: # keywords only
if adapted_keywords:
adapted.update(adapted_keywords)
return func(**adapted)
adapted_posargs = [adapted[arg] for arg in argspec.args]
adapted_posargs.extend(adapted_varargs)
return func(*adapted_posargs, **adapted_keywords)
return adapting | [
"Create a decorator for validating and adapting function parameters.\n\n Example::\n\n @adapts(a=\"number\", body={\"+field_ids\": [V.AdaptTo(int)], \"is_ok\": bool})\n def f(a, body):\n print (a, body.field_ids, body.is_ok)\n\n :param schemas: The schema for adapting a given parameter.\n "
]
|
Please provide a description of the function:def _ObjectFactory(obj):
if isinstance(obj, dict):
optional, required = {}, {}
for key, value in iteritems(obj):
if key.startswith("+"):
required[key[1:]] = value
elif key.startswith("?"):
optional[key[1:]] = value
elif Object.REQUIRED_PROPERTIES:
required[key] = value
else:
optional[key] = value
return Object(optional, required) | [
"Parse a python ``{name: schema}`` dict as an :py:class:`Object` instance.\n\n - A property name prepended by \"+\" is required\n - A property name prepended by \"?\" is optional\n - Any other property is required if :py:attr:`Object.REQUIRED_PROPERTIES`\n is True else it's optional\n "
]
|
Please provide a description of the function:def get_checksum_metadata_tag(self):
if not self._checksums:
print("Warning: No checksums have been computed for this file.")
return {str(_hash_name): str(_hash_value) for _hash_name, _hash_value in self._checksums.items()} | [
" Returns a map of checksum values by the name of the hashing function that produced it."
]
|
Please provide a description of the function:def compute_checksum(self):
if self._filename.startswith("s3://"):
print("Warning: Did not perform client-side checksumming for file in S3. To be implemented.")
pass
else:
checksumCalculator = self.ChecksumCalculator(self._filename)
self._checksums = checksumCalculator.compute() | [
" Calculates checksums for a given file. "
]
|
Please provide a description of the function:def get_credentials(self):
creds_mgr = CredentialsManager(self)
creds = creds_mgr.get_credentials_from_upload_api()
return {
'aws_access_key_id': creds['access_key'],
'aws_secret_access_key': creds['secret_key'],
'aws_session_token': creds['token'],
'expiry_time': creds['expiry_time']
} | [
"\n Return a set of credentials that may be used to access the Upload Area folder in the S3 bucket\n :return: a dict containing AWS credentials in a format suitable for passing to Boto3\n or if capitalized, used as environment variables\n "
]
|
Please provide a description of the function:def list(self, detail=False):
creds_provider = CredentialsManager(upload_area=self)
s3agent = S3Agent(credentials_provider=creds_provider)
key_prefix = self.uuid + "/"
key_prefix_length = len(key_prefix)
for page in s3agent.list_bucket_by_page(bucket_name=self.uri.bucket_name, key_prefix=key_prefix):
file_list = [key[key_prefix_length:] for key in page] # cut off upload-area-id/
if detail:
files_info = self.upload_service.api_client.files_info(self.uuid, file_list)
else:
files_info = [{'name': filename} for filename in file_list]
for file_info in files_info:
yield file_info | [
"\n A generator that yields information about each file in the upload area\n :param detail: return detailed file information (slower)\n :return: a list of dicts containing at least 'name', or more of detail was requested\n "
]
|
Please provide a description of the function:def store_file(self, filename, file_content, content_type):
return self.upload_service.api_client.store_file(area_uuid=self.uuid,
filename=filename,
file_content=file_content,
content_type=content_type) | [
"\n Store a small file in an Upload Area\n\n :param str area_uuid: A RFC4122-compliant ID for the upload area\n :param str filename: The name the file will have in the Upload Area\n :param str file_content: The contents of the file\n :param str content_type: The MIME-type for the file\n :return: information about the stored file (similar to that returned by files_info)\n :rtype: dict\n :raises UploadApiException: if file could not be stored\n "
]
|
Please provide a description of the function:def upload_files(self, file_paths, file_size_sum=0, dcp_type="data", target_filename=None,
use_transfer_acceleration=True, report_progress=False, sync=True):
self._setup_s3_agent_for_file_upload(file_count=len(file_paths),
file_size_sum=file_size_sum,
use_transfer_acceleration=use_transfer_acceleration)
pool = ThreadPool()
if report_progress:
print("\nStarting upload of %s files to upload area %s" % (len(file_paths), self.uuid))
for file_path in file_paths:
pool.add_task(self._upload_file, file_path,
target_filename=target_filename,
use_transfer_acceleration=use_transfer_acceleration,
report_progress=report_progress,
sync=sync)
pool.wait_for_completion()
if report_progress:
number_of_errors = len(self.s3agent.failed_uploads)
if number_of_errors == 0:
print(
"Completed upload of %d files to upload area %s\n" %
(self.s3agent.file_upload_completed_count, self.uuid))
else:
error = "\nThe following files failed:"
for k, v in self.s3agent.failed_uploads.items():
error += "\n%s: [Exception] %s" % (k, v)
error += "\nPlease retry or contact an hca administrator at [email protected] for help.\n"
raise UploadException(error) | [
"\n A function that takes in a list of file paths and other optional args for parallel file upload\n "
]
|
Please provide a description of the function:def validate_files(self, file_list, validator_image, original_validation_id="", environment={}):
return self.upload_service.api_client.validate_files(area_uuid=self.uuid,
file_list=file_list,
validator_image=validator_image,
original_validation_id=original_validation_id,
environment=environment) | [
"\n Invoke supplied validator Docker image and give it access to the file/s.\n The validator must be based off the base validator Docker image.\n\n :param list file_list: A list of files within the Upload Area to be validated\n :param str validator_image: the location of a docker image to use for validation\n :param str original_validation_id: [optional]\n :param dict environment: [optional] list of environment variable to set for the validator\n :return: ID of scheduled validation\n :rtype: dict\n :raises UploadApiException: if information could not be obtained\n "
]
|
Please provide a description of the function:def checksum_status(self, filename):
return self.upload_service.api_client.checksum_status(area_uuid=self.uuid, filename=filename) | [
"\n Retrieve checksum status and values for a file\n\n :param str filename: The name of the file within the Upload Area\n :return: a dict with checksum information\n :rtype: dict\n :raises UploadApiException: if information could not be obtained\n "
]
|
Please provide a description of the function:def validation_status(self, filename):
return self.upload_service.api_client.validation_status(area_uuid=self.uuid, filename=filename) | [
"\n Get status and results of latest validation job for a file.\n\n :param str filename: The name of the file within the Upload Area\n :return: a dict with validation information\n :rtype: dict\n :raises UploadApiException: if information could not be obtained\n "
]
|
Please provide a description of the function:def check_if_release_is_current(log):
if __version__ == '0.0.0':
return
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
latest_pypi_version = client.package_releases('hca')
latest_version_nums = [int(i) for i in latest_pypi_version[0].split('.')]
this_version_nums = [int(i) for i in __version__.split('.')]
for i in range(max([len(latest_version_nums), len(this_version_nums)])):
try:
if this_version_nums[i] < latest_version_nums[i]:
log.warning('WARNING: Python (pip) package "hca" is not up-to-date!\n'
'You have hca version: ' + str(__version__) + '\n'
'Please use the latest hca version: ' + str(latest_pypi_version[0]))
# handles the odd case where a user's current __version__ is higher than PyPi's
elif this_version_nums[i] > latest_version_nums[i]:
break
# if 4.2 compared to 4.3.1, this handles the missing element
except IndexError:
pass | [
"Warns the user if their release is behind the latest PyPi __version__."
]
|
Please provide a description of the function:def _parse_docstring(docstring):
this will be the summary
:param name: describe the parameter called name.
this will be the descriptions
* more description
* more description
This will also be in the description
\
settings = OptionParser(components=(RSTParser,)).get_default_values()
rstparser = RSTParser()
document = utils.new_document(' ', settings)
rstparser.parse(docstring, document)
if document.children[0].tagname != 'block_quote':
logger.warning("The first line of the docstring must be blank.")
else:
document = document.children[0]
def get_params(field_list_node, params):
for field in field_list_node.children:
name = field.children[0].rawsource.split(' ')
if 'param' == name[0]:
params[name[-1]] = field.children[1].astext()
method_args = {'summary': '', 'params': dict(), 'description': ''}
for node in document.children:
if node.tagname is 'paragraph' and method_args['summary'] == '':
method_args['summary'] = node.astext()
elif node.tagname is 'field_list':
get_params(node, method_args['params'])
else:
method_args['description'] += '\n' + node.astext()
return method_args | [
"\n Using the sphinx RSTParse to parse __doc__ for argparse `parameters`, `help`, and `description`. The first\n rst paragraph encountered it treated as the argparse help text. Any param fields are treated as argparse\n arguments. Any other text is combined and added to the argparse description.\n\n example:\n \\",
"\n\n :param str docstring:\n :return:\n :rtype: dict\n "
]
|
Please provide a description of the function:def sizeof_fmt(num, suffix='B'):
precision = {'': 0, 'Ki': 0, 'Mi': 0, 'Gi': 3, 'Ti': 6, 'Pi': 9, 'Ei': 12, 'Zi': 15}
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
format_string = "{number:.%df} {unit}{suffix}" % precision[unit]
return format_string.format(number=num, unit=unit, suffix=suffix)
num /= 1024.0
return "%.18f %s%s" % (num, 'Yi', suffix) | [
"\n Adapted from https://stackoverflow.com/a/1094933\n Re: precision - display enough decimals to show progress on a slow (<5 MB/s) Internet connection\n "
]
|
Please provide a description of the function:def _item_exists_in_bucket(self, bucket, key, checksums):
try:
obj = self.target_s3.meta.client.head_object(Bucket=bucket, Key=key)
if obj and obj.containsKey('Metadata'):
if obj['Metadata'] == checksums:
return True
except ClientError:
# An exception from calling `head_object` indicates that no file with the specified name could be found
# in the specified bucket.
return False | [
" Returns true if the key already exists in the current bucket and the clientside checksum matches the\n file's checksums, and false otherwise."
]
|
Please provide a description of the function:def upload_to_cloud(file_handles, staging_bucket, replica, from_cloud=False):
s3 = boto3.resource("s3")
file_uuids = []
key_names = []
abs_file_paths = []
if from_cloud:
file_uuids, key_names = _copy_from_s3(file_handles[0], s3)
else:
destination_bucket = s3.Bucket(staging_bucket)
for raw_fh in file_handles:
file_size = os.path.getsize(raw_fh.name)
multipart_chunksize = s3_multipart.get_s3_multipart_chunk_size(file_size)
tx_cfg = TransferConfig(multipart_threshold=s3_multipart.MULTIPART_THRESHOLD,
multipart_chunksize=multipart_chunksize)
with ChecksummingBufferedReader(raw_fh, multipart_chunksize) as fh:
file_uuid = str(uuid.uuid4())
key_name = "{}/{}".format(file_uuid, os.path.basename(fh.raw.name))
destination_bucket.upload_fileobj(
fh,
key_name,
Config=tx_cfg,
ExtraArgs={
'ContentType': _mime_type(fh.raw.name),
}
)
sums = fh.get_checksums()
metadata = {
"hca-dss-s3_etag": sums["s3_etag"],
"hca-dss-sha1": sums["sha1"],
"hca-dss-sha256": sums["sha256"],
"hca-dss-crc32c": sums["crc32c"],
}
s3.meta.client.put_object_tagging(Bucket=destination_bucket.name,
Key=key_name,
Tagging=dict(TagSet=encode_tags(metadata)))
file_uuids.append(file_uuid)
key_names.append(key_name)
abs_file_paths.append(fh.raw.name)
return file_uuids, key_names, abs_file_paths | [
"\n Upload files to cloud.\n\n :param file_handles: If from_cloud, file_handles is a aws s3 directory path to files with appropriate\n metadata uploaded. Else, a list of binary file_handles to upload.\n :param staging_bucket: The aws bucket to upload the files to.\n :param replica: The cloud replica to write to. One of 'aws', 'gc', or 'azure'. No functionality now.\n :return: a list of file uuids, key-names, and absolute file paths (local) for uploaded files\n "
]
|
Please provide a description of the function:def download(self, bundle_uuid, replica, version="", download_dir="",
metadata_files=('*',), data_files=('*',),
num_retries=10, min_delay_seconds=0.25):
errors = 0
with concurrent.futures.ThreadPoolExecutor(self.threads) as executor:
futures_to_dss_file = {executor.submit(task): dss_file
for dss_file, task in self._download_tasks(bundle_uuid,
replica,
version,
download_dir,
metadata_files,
data_files,
num_retries,
min_delay_seconds)}
for future in concurrent.futures.as_completed(futures_to_dss_file):
dss_file = futures_to_dss_file[future]
try:
future.result()
except Exception as e:
errors += 1
logger.warning('Failed to download file %s version %s from replica %s',
dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e)
if errors:
raise RuntimeError('{} file(s) failed to download'.format(errors)) | [
"\n Download a bundle and save it to the local filesystem as a directory.\n\n :param str bundle_uuid: The uuid of the bundle to download\n :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and\n `gcp` for Google Cloud Platform. [aws, gcp]\n :param str version: The version to download, else if not specified, download the latest. The version is a\n timestamp of bundle creation in RFC3339\n :param str dest_name: The destination file path for the download\n :param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be\n matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is\n set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded.\n :param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched\n case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The\n file will be downloaded only if a data file matches any of the patterns in `data_files` will it be\n downloaded.\n :param int num_retries: The initial quota of download failures to accept before exiting due to\n failures. The number of retries increase and decrease as file chucks succeed and fail.\n :param float min_delay_seconds: The minimum number of seconds to wait in between retries.\n\n Download a bundle and save it to the local filesystem as a directory.\n By default, all data and metadata files are downloaded. To disable the downloading of data files,\n use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically).\n Likewise for metadata files.\n\n If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and\n decreases each time we successfully read a block. We set a quota for the number of failures that goes up with\n every successful block read and down with each failure.\n "
]
|
Please provide a description of the function:def _download_to_filestore(self, download_dir, dss_file, num_retries=10, min_delay_seconds=0.25):
dest_path = self._file_path(dss_file.sha256, download_dir)
if os.path.exists(dest_path):
logger.info("Skipping download of '%s' because it already exists at '%s'.", dss_file.name, dest_path)
else:
logger.debug("Downloading '%s' to '%s'.", dss_file.name, dest_path)
self._download_file(dss_file, dest_path, num_retries=num_retries, min_delay_seconds=min_delay_seconds)
logger.info("Download '%s' to '%s'.", dss_file.name, dest_path)
return dest_path | [
"\n Attempt to download the data and save it in the 'filestore' location dictated by self._file_path()\n "
]
|
Please provide a description of the function:def _download_file(self, dss_file, dest_path, num_retries=10, min_delay_seconds=0.25):
directory, _ = os.path.split(dest_path)
if directory:
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with atomic_write(dest_path, mode="wb", overwrite=True) as fh:
if dss_file.size == 0:
return
download_hash = self._do_download_file(dss_file, fh, num_retries, min_delay_seconds)
if download_hash.lower() != dss_file.sha256.lower():
# No need to delete what's been written. atomic_write ensures we're cleaned up
logger.error("%s", "File {}: GET FAILED. Checksum mismatch.".format(dss_file.uuid))
raise ValueError("Expected sha256 {} Received sha256 {}".format(
dss_file.sha256.lower(), download_hash.lower())) | [
"\n Attempt to download the data. If a retryable exception occurs, we wait a bit and retry again. The delay\n increases each time we fail and decreases each time we successfully read a block. We set a quota for the\n number of failures that goes up with every successful block read and down with each failure.\n\n If we can, we will attempt HTTP resume. However, we verify that the server supports HTTP resume. If the\n ranged get doesn't yield the correct header, then we start over.\n "
]
|
Please provide a description of the function:def _do_download_file(self, dss_file, fh, num_retries, min_delay_seconds):
hasher = hashlib.sha256()
delay = min_delay_seconds
retries_left = num_retries
while True:
try:
response = self.get_file._request(
dict(uuid=dss_file.uuid, version=dss_file.version, replica=dss_file.replica),
stream=True,
headers={
'Range': "bytes={}-".format(fh.tell())
},
)
try:
if not response.ok:
logger.error("%s", "File {}: GET FAILED.".format(dss_file.uuid))
logger.error("%s", "Response: {}".format(response.text))
break
consume_bytes = int(fh.tell())
server_start = 0
content_range_header = response.headers.get('Content-Range', None)
if content_range_header is not None:
cre = re.compile("bytes (\d+)-(\d+)")
mo = cre.search(content_range_header)
if mo is not None:
server_start = int(mo.group(1))
consume_bytes -= server_start
assert consume_bytes >= 0
if server_start > 0 and consume_bytes == 0:
logger.info("%s", "File {}: Resuming at {}.".format(
dss_file.uuid, server_start))
elif consume_bytes > 0:
logger.info("%s", "File {}: Resuming at {}. Dropping {} bytes to match".format(
dss_file.uuid, server_start, consume_bytes))
while consume_bytes > 0:
bytes_to_read = min(consume_bytes, 1024*1024)
content = response.iter_content(chunk_size=bytes_to_read)
chunk = next(content)
if chunk:
consume_bytes -= len(chunk)
for chunk in response.iter_content(chunk_size=1024*1024):
if chunk:
fh.write(chunk)
hasher.update(chunk)
retries_left = min(retries_left + 1, num_retries)
delay = max(delay / 2, min_delay_seconds)
break
finally:
response.close()
except (ChunkedEncodingError, ConnectionError, ReadTimeout):
if retries_left > 0:
logger.info("%s", "File {}: GET FAILED. Attempting to resume.".format(dss_file.uuid))
time.sleep(delay)
delay *= 2
retries_left -= 1
continue
raise
return hasher.hexdigest() | [
"\n Abstracts away complications for downloading a file, handles retries and delays, and computes its hash\n "
]
|
Please provide a description of the function:def _file_path(cls, checksum, download_dir):
checksum = checksum.lower()
file_prefix = '_'.join(['files'] + list(map(str, cls.DIRECTORY_NAME_LENGTHS)))
path_pieces = [download_dir, '.hca', 'v2', file_prefix]
checksum_index = 0
assert(sum(cls.DIRECTORY_NAME_LENGTHS) <= len(checksum))
for prefix_length in cls.DIRECTORY_NAME_LENGTHS:
path_pieces.append(checksum[checksum_index:(checksum_index + prefix_length)])
checksum_index += prefix_length
path_pieces.append(checksum)
return os.path.join(*path_pieces) | [
"\n returns a file's relative local path based on the nesting parameters and the files hash\n :param checksum: a string checksum\n :param download_dir: root directory for filestore\n :return: relative Path object\n "
]
|
Please provide a description of the function:def _write_output_manifest(self, manifest, filestore_root):
output = os.path.basename(manifest)
fieldnames, source_manifest = self._parse_manifest(manifest)
if 'file_path' not in fieldnames:
fieldnames.append('file_path')
with atomic_write(output, overwrite=True) as f:
delimiter = b'\t' if USING_PYTHON2 else '\t'
writer = csv.DictWriter(f, fieldnames, delimiter=delimiter, quoting=csv.QUOTE_NONE)
writer.writeheader()
for row in source_manifest:
row['file_path'] = self._file_path(row['file_sha256'], filestore_root)
writer.writerow(row)
if os.path.isfile(output):
logger.warning('Overwriting manifest %s', output)
logger.info('Rewrote manifest %s with additional column containing path to downloaded files.', output) | [
"\n Adds the file path column to the manifest and writes the copy to the current directory. If the original manifest\n is in the current directory it is overwritten with a warning.\n "
]
|
Please provide a description of the function:def download_manifest_v2(self, manifest, replica,
num_retries=10,
min_delay_seconds=0.25,
download_dir='.'):
fieldnames, rows = self._parse_manifest(manifest)
errors = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
futures_to_dss_file = {}
for row in rows:
dss_file = DSSFile.from_manifest_row(row, replica)
future = executor.submit(self._download_to_filestore, download_dir, dss_file,
num_retries=num_retries, min_delay_seconds=min_delay_seconds)
futures_to_dss_file[future] = dss_file
for future in concurrent.futures.as_completed(futures_to_dss_file):
dss_file = futures_to_dss_file[future]
try:
future.result()
except Exception as e:
errors += 1
logger.warning('Failed to download file %s version %s from replica %s',
dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e)
if errors:
raise RuntimeError('{} file(s) failed to download'.format(errors))
else:
self._write_output_manifest(manifest, download_dir) | [
"\n Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it.\n The files are downloaded in the version 2 format.\n\n This download format will serve as the main storage format for downloaded files. If a user specifies a different\n format for download (coming in the future) the files will first be downloaded in this format, then hard-linked\n to the user's preferred format.\n\n :param str manifest: path to a TSV (tab-separated values) file listing files to download\n :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and\n `gcp` for Google Cloud Platform. [aws, gcp]\n :param int num_retries: The initial quota of download failures to accept before exiting due to\n failures. The number of retries increase and decrease as file chucks succeed and fail.\n :param float min_delay_seconds: The minimum number of seconds to wait in between retries.\n\n Process the given manifest file in TSV (tab-separated values) format and download the files\n referenced by it.\n\n Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row\n must declare the following columns:\n\n * `file_uuid` - the UUID of the file in DSS.\n\n * `file_version` - the version of the file in DSS.\n\n The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is\n insignificant because the TSV is required to have a header row.\n "
]
|
Please provide a description of the function:def download_manifest(self, manifest, replica, num_retries=10, min_delay_seconds=0.25, download_dir=''):
file_errors = 0
file_task, bundle_errors = self._download_manifest_tasks(manifest,
replica,
num_retries,
min_delay_seconds,
download_dir)
with concurrent.futures.ThreadPoolExecutor(self.threads) as executor:
futures_to_dss_file = {executor.submit(task): dss_file
for dss_file, task in file_task}
for future in concurrent.futures.as_completed(futures_to_dss_file):
dss_file = futures_to_dss_file[future]
try:
future.result()
except Exception as e:
file_errors += 1
logger.warning('Failed to download file %s version %s from replica %s',
dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e)
if file_errors or bundle_errors:
bundle_error_str = '{} bundle(s) failed to download'.format(bundle_errors) if bundle_errors else ''
file_error_str = '{} file(s) failed to download'.format(file_errors) if file_errors else ''
raise RuntimeError(bundle_error_str + (' and ' if bundle_errors and file_errors else '') + file_error_str)
else:
self._write_output_manifest(manifest, download_dir)
logger.info('Primary copies of the files have been downloaded to `.hca` and linked '
'into per-bundle subdirectories of the current directory.') | [
"\n Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it.\n\n :param str manifest: path to a TSV (tab-separated values) file listing files to download\n :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and\n `gcp` for Google Cloud Platform. [aws, gcp]\n :param int num_retries: The initial quota of download failures to accept before exiting due to\n failures. The number of retries increase and decrease as file chucks succeed and fail.\n :param float min_delay_seconds: The minimum number of seconds to wait in between retries.\n\n Process the given manifest file in TSV (tab-separated values) format and download the files\n referenced by it.\n\n Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row\n must declare the following columns:\n\n * `bundle_uuid` - the UUID of the bundle containing the file in DSS.\n\n * `bundle_version` - the version of the bundle containing the file in DSS.\n\n * `file_name` - the name of the file as specified in the bundle.\n\n The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is\n insignificant because the TSV is required to have a header row.\n "
]
|
Please provide a description of the function:def upload(self, src_dir, replica, staging_bucket, timeout_seconds=1200):
bundle_uuid = str(uuid.uuid4())
version = datetime.utcnow().strftime("%Y-%m-%dT%H%M%S.%fZ")
files_to_upload, files_uploaded = [], []
for filename in iter_paths(src_dir):
full_file_name = filename.path
files_to_upload.append(open(full_file_name, "rb"))
logger.info("Uploading %i files from %s to %s", len(files_to_upload), src_dir, staging_bucket)
file_uuids, uploaded_keys, abs_file_paths = upload_to_cloud(files_to_upload, staging_bucket=staging_bucket,
replica=replica, from_cloud=False)
for file_handle in files_to_upload:
file_handle.close()
filenames = [object_name_builder(p, src_dir) for p in abs_file_paths]
filename_key_list = list(zip(filenames, file_uuids, uploaded_keys))
for filename, file_uuid, key in filename_key_list:
filename = filename.replace('\\', '/') # for windows paths
if filename.startswith('/'):
filename = filename.lstrip('/')
logger.info("File %s: registering...", filename)
# Generating file data
creator_uid = self.config.get("creator_uid", 0)
source_url = "s3://{}/{}".format(staging_bucket, key)
logger.info("File %s: registering from %s -> uuid %s", filename, source_url, file_uuid)
response = self.put_file._request(dict(
uuid=file_uuid,
bundle_uuid=bundle_uuid,
version=version,
creator_uid=creator_uid,
source_url=source_url
))
files_uploaded.append(dict(name=filename, version=version, uuid=file_uuid, creator_uid=creator_uid))
if response.status_code in (requests.codes.ok, requests.codes.created):
logger.info("File %s: Sync copy -> %s", filename, version)
else:
assert response.status_code == requests.codes.accepted
logger.info("File %s: Async copy -> %s", filename, version)
timeout = time.time() + timeout_seconds
wait = 1.0
while time.time() < timeout:
try:
self.head_file(uuid=file_uuid, replica="aws", version=version)
break
except SwaggerAPIException as e:
if e.code != requests.codes.not_found:
msg = "File {}: Unexpected server response during registration"
req_id = 'X-AWS-REQUEST-ID: {}'.format(response.headers.get("X-AWS-REQUEST-ID"))
raise RuntimeError(msg.format(filename), req_id)
time.sleep(wait)
wait = min(60.0, wait * self.UPLOAD_BACKOFF_FACTOR)
else:
# timed out. :(
req_id = 'X-AWS-REQUEST-ID: {}'.format(response.headers.get("X-AWS-REQUEST-ID"))
raise RuntimeError("File {}: registration FAILED".format(filename), req_id)
logger.debug("Successfully uploaded file")
file_args = [{'indexed': file_["name"].endswith(".json"),
'name': file_['name'],
'version': file_['version'],
'uuid': file_['uuid']} for file_ in files_uploaded]
logger.info("%s", "Bundle {}: Registering...".format(bundle_uuid))
response = self.put_bundle(uuid=bundle_uuid,
version=version,
replica=replica,
creator_uid=creator_uid,
files=file_args)
logger.info("%s", "Bundle {}: Registered successfully".format(bundle_uuid))
return {
"bundle_uuid": bundle_uuid,
"creator_uid": creator_uid,
"replica": replica,
"version": response["version"],
"files": files_uploaded
} | [
"\n Upload a directory of files from the local filesystem and create a bundle containing the uploaded files.\n\n :param str src_dir: file path to a directory of files to upload to the replica.\n :param str replica: the replica to upload to. The supported replicas are: `aws` for Amazon Web Services, and\n `gcp` for Google Cloud Platform. [aws, gcp]\n :param str staging_bucket: a client controlled AWS S3 storage bucket to upload from.\n :param int timeout_seconds: the time to wait for a file to upload to replica.\n\n Upload a directory of files from the local filesystem and create a bundle containing the uploaded files.\n This method requires the use of a client-controlled object storage bucket to stage the data for upload.\n "
]
|
Please provide a description of the function:def iter_paths(src_dir):
for x in scandir(os.path.join(src_dir)):
if x.is_dir(follow_symlinks=False):
for x in iter_paths(x.path):
yield x
else:
yield x | [
"\n Function that recursively locates files within folder\n Note: scandir does not guarantee ordering\n :param src_dir: string for directory to be parsed through\n :return an iterable of DirEntry objects all files within the src_dir\n "
]
|
Please provide a description of the function:def hardlink(source, link_name):
if sys.version_info < (3,) and platform.system() == 'Windows': # pragma: no cover
import ctypes
create_hard_link = ctypes.windll.kernel32.CreateHardLinkW
create_hard_link.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p]
create_hard_link.restype = ctypes.wintypes.BOOL
res = create_hard_link(link_name, source, None)
if res == 0:
raise ctypes.WinError()
else:
try:
os.link(source, link_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
# It's possible that the user created a different file with the same name as the
# one we're trying to download. Thus we need to check the if the inode is different
# and raise an error in this case.
source_stat = os.stat(source)
dest_stat = os.stat(link_name)
# Check device first because different drives can have the same inode number
if source_stat.st_dev != dest_stat.st_dev or source_stat.st_ino != dest_stat.st_ino:
raise | [
"\n Create a hardlink in a portable way\n\n The code for Windows support is adapted from:\n https://github.com/sunshowers/ntfs/blob/master/ntfsutils/hardlink.py\n "
]
|
Please provide a description of the function:def request_with_retries_on_post_search(self, session, url, query, json_input, stream, headers):
# TODO: Revert this PR as soon as the appropriate swagger definitions have percolated up
# to prod and merged; see https://github.com/HumanCellAtlas/data-store/pull/1961
status_code = 500
if '/v1/search' in url:
retry_count = 10
else:
retry_count = 1
while status_code in (500, 502, 503, 504) and retry_count > 0:
try:
retry_count -= 1
res = session.request(self.http_method,
url,
params=query,
json=json_input,
stream=stream,
headers=headers,
timeout=self.client.timeout_policy)
status_code = res.status_code
except SwaggerAPIException:
if retry_count > 0:
pass
else:
raise
return res | [
"\n Submit a request and retry POST search requests specifically.\n\n We don't currently retry on POST requests, and this is intended as a temporary fix until\n the swagger is updated and changes applied to prod. In the meantime, this function will add\n retries specifically for POST search (and any other POST requests will not be retried).\n "
]
|
Please provide a description of the function:def load_swagger_json(swagger_json, ptr_str="$ref"):
refs = []
def store_refs(d):
if len(d) == 1 and ptr_str in d:
refs.append(d)
return d
swagger_content = json.load(swagger_json, object_hook=store_refs)
for ref in refs:
_, target = ref.popitem()
assert target[0] == "#"
ref.update(resolve_pointer(swagger_content, target[1:]))
return swagger_content | [
"\n Load the Swagger JSON and resolve {\"$ref\": \"#/...\"} internal JSON Pointer references.\n "
]
|
Please provide a description of the function:def refresh_swagger(self):
try:
os.remove(self._get_swagger_filename(self.swagger_url))
except EnvironmentError as e:
logger.warn(os.strerror(e.errno))
else:
self.__init__() | [
"\n Manually refresh the swagger document. This can help resolve errors communicate with the API.\n "
]
|
Please provide a description of the function:def login(self, access_token=""):
if access_token:
credentials = argparse.Namespace(token=access_token, refresh_token=None, id_token=None)
else:
scopes = ["openid", "email", "offline_access"]
from google_auth_oauthlib.flow import InstalledAppFlow
flow = InstalledAppFlow.from_client_config(self.application_secrets, scopes=scopes)
msg = "Authentication successful. Please close this tab and run HCA CLI commands in the terminal."
credentials = flow.run_local_server(success_message=msg, audience=self._audience)
# TODO: (akislyuk) test token autorefresh on expiration
self.config.oauth2_token = dict(access_token=credentials.token,
refresh_token=credentials.refresh_token,
id_token=credentials.id_token,
expires_at="-1",
token_type="Bearer")
print("Storing access credentials") | [
"\n Configure and save {prog} authentication credentials.\n\n This command may open a browser window to ask for your\n consent to use web service authentication credentials.\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.