body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def apply_bbox_cutout(image, bboxes, pad_fraction):
'Applies cutout to a single bounding box within image.'
random_index = tf.random_uniform(shape=[], maxval=tf.shape(bboxes)[0], dtype=tf.int32)
chosen_bbox = tf.gather(bboxes, random_index)
(mask, mean) = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
replace = (mean if replace_with_mean else 128)
image = tf.where(tf.equal(mask, 0), tf.cast((tf.ones_like(image, dtype=image.dtype) * replace), dtype=image.dtype), image)
return image | -3,658,342,878,226,009,000 | Applies cutout to a single bounding box within image. | efficientdet/aug/autoaugment.py | apply_bbox_cutout | datawowio/automl | python | def apply_bbox_cutout(image, bboxes, pad_fraction):
random_index = tf.random_uniform(shape=[], maxval=tf.shape(bboxes)[0], dtype=tf.int32)
chosen_bbox = tf.gather(bboxes, random_index)
(mask, mean) = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
replace = (mean if replace_with_mean else 128)
image = tf.where(tf.equal(mask, 0), tf.cast((tf.ones_like(image, dtype=image.dtype) * replace), dtype=image.dtype), image)
return image |
@badpenny.periodic_task(seconds=TASK_TIME_OUT)
def cleanup_old_tasks(job_status):
'delete any tracker task if it is older than the time a task can live for.'
session = current_app.db.session('relengapi')
expiry_cutoff = (now() - datetime.timedelta(seconds=TASK_TIME_OUT))
table = tables.ArchiverTask
for tracker in session.query(table).order_by(table.created_at):
if (tracker.created_at < expiry_cutoff):
delete_tracker(tracker)
else:
break | -4,374,379,887,311,869,400 | delete any tracker task if it is older than the time a task can live for. | relengapi/blueprints/archiver/__init__.py | cleanup_old_tasks | lundjordan/build-relengapi | python | @badpenny.periodic_task(seconds=TASK_TIME_OUT)
def cleanup_old_tasks(job_status):
session = current_app.db.session('relengapi')
expiry_cutoff = (now() - datetime.timedelta(seconds=TASK_TIME_OUT))
table = tables.ArchiverTask
for tracker in session.query(table).order_by(table.created_at):
if (tracker.created_at < expiry_cutoff):
delete_tracker(tracker)
else:
break |
@bp.route('/status/<task_id>')
@api.apimethod(MozharnessArchiveTask, unicode)
def task_status(task_id):
"\n Check and return the current state of the create_and_upload_archive celery task with task id\n of <task_id>.\n\n If the task is unknown, state will be PENDING. Once the task starts it will be updated to\n STARTED and finally, if it completes, it will be either SUCCESS (no exceptions), or FAILURE.\n\n See update_state() within create_and_upload_archive and\n http://celery.readthedocs.org/en/latest/reference/celery.states.html for more details.\n\n If state is SUCCESS, it is safe to check response['s3_urls'] for the archives submitted to s3\n "
task = create_and_upload_archive.AsyncResult(task_id)
task_tracker = tables.ArchiverTask.query.filter((tables.ArchiverTask.task_id == task_id)).first()
log.info('checking status of task id {}: current state {}'.format(task_id, task.state))
task_info = (task.info or {})
response = {'state': task.state}
if (task.state != 'FAILURE'):
response['status'] = task_info.get('status', 'no status available at this point.')
response['src_url'] = task_info.get('src_url', '')
response['s3_urls'] = task_info.get('s3_urls', {})
else:
response['status'] = str(task.info)
response['src_url'] = ''
response['s3_urls'] = {}
if task_tracker:
if (task.state in FINISHED_STATES):
delete_tracker(task_tracker)
elif ((task.state == 'PENDING') and (task_tracker.pending_expires_at < now())):
log.info('Task {} has expired from pending too long. Re-creating task'.format(task.id))
renew_tracker_pending_expiry(task_tracker)
create_and_upload_archive.apply_async(args=[task_tracker.src_url, task_tracker.s3_key], task_id=task.id)
response['state'] = 'RETRY'
response['status'] = 'Task has expired from pending for too long. Re-creating task.'
elif (task_tracker.state != task.state):
update_tracker_state(task_tracker, task.state)
return MozharnessArchiveTask(**response) | 2,060,864,171,519,897,900 | Check and return the current state of the create_and_upload_archive celery task with task id
of <task_id>.
If the task is unknown, state will be PENDING. Once the task starts it will be updated to
STARTED and finally, if it completes, it will be either SUCCESS (no exceptions), or FAILURE.
See update_state() within create_and_upload_archive and
http://celery.readthedocs.org/en/latest/reference/celery.states.html for more details.
If state is SUCCESS, it is safe to check response['s3_urls'] for the archives submitted to s3 | relengapi/blueprints/archiver/__init__.py | task_status | lundjordan/build-relengapi | python | @bp.route('/status/<task_id>')
@api.apimethod(MozharnessArchiveTask, unicode)
def task_status(task_id):
"\n Check and return the current state of the create_and_upload_archive celery task with task id\n of <task_id>.\n\n If the task is unknown, state will be PENDING. Once the task starts it will be updated to\n STARTED and finally, if it completes, it will be either SUCCESS (no exceptions), or FAILURE.\n\n See update_state() within create_and_upload_archive and\n http://celery.readthedocs.org/en/latest/reference/celery.states.html for more details.\n\n If state is SUCCESS, it is safe to check response['s3_urls'] for the archives submitted to s3\n "
task = create_and_upload_archive.AsyncResult(task_id)
task_tracker = tables.ArchiverTask.query.filter((tables.ArchiverTask.task_id == task_id)).first()
log.info('checking status of task id {}: current state {}'.format(task_id, task.state))
task_info = (task.info or {})
response = {'state': task.state}
if (task.state != 'FAILURE'):
response['status'] = task_info.get('status', 'no status available at this point.')
response['src_url'] = task_info.get('src_url', )
response['s3_urls'] = task_info.get('s3_urls', {})
else:
response['status'] = str(task.info)
response['src_url'] =
response['s3_urls'] = {}
if task_tracker:
if (task.state in FINISHED_STATES):
delete_tracker(task_tracker)
elif ((task.state == 'PENDING') and (task_tracker.pending_expires_at < now())):
log.info('Task {} has expired from pending too long. Re-creating task'.format(task.id))
renew_tracker_pending_expiry(task_tracker)
create_and_upload_archive.apply_async(args=[task_tracker.src_url, task_tracker.s3_key], task_id=task.id)
response['state'] = 'RETRY'
response['status'] = 'Task has expired from pending for too long. Re-creating task.'
elif (task_tracker.state != task.state):
update_tracker_state(task_tracker, task.state)
return MozharnessArchiveTask(**response) |
@bp.route('/hgmo/<path:repo>/<rev>')
@api.apimethod(None, unicode, unicode, unicode, unicode, unicode, status_code=302)
def get_hgmo_archive(repo, rev, subdir=None, suffix='tar.gz', preferred_region=None):
'\n An archiver for hg.mozilla.org related requests. Uses relengapi.blueprints.archiver.get_archive\n\n :param repo: the repo location off of hg.mozilla.org/\n :param rev: the rev associated with the repo\n :param subdir: optional subdir path to only archive a portion of the repo\n :param suffix: the archive extension type. defaulted to tar.gz\n :param preferred_region: the preferred s3 region to use\n '
rev = rev[0:12]
src_url = current_app.config['ARCHIVER_HGMO_URL_TEMPLATE'].format(repo=repo, rev=rev, suffix=suffix, subdir=(subdir or ''))
key = '{repo}-{rev}.{suffix}'.format(repo=repo, rev=rev, suffix=suffix)
if subdir:
key += '/{}'.format(subdir)
return get_archive(src_url, key, preferred_region) | -5,873,509,484,656,335,000 | An archiver for hg.mozilla.org related requests. Uses relengapi.blueprints.archiver.get_archive
:param repo: the repo location off of hg.mozilla.org/
:param rev: the rev associated with the repo
:param subdir: optional subdir path to only archive a portion of the repo
:param suffix: the archive extension type. defaulted to tar.gz
:param preferred_region: the preferred s3 region to use | relengapi/blueprints/archiver/__init__.py | get_hgmo_archive | lundjordan/build-relengapi | python | @bp.route('/hgmo/<path:repo>/<rev>')
@api.apimethod(None, unicode, unicode, unicode, unicode, unicode, status_code=302)
def get_hgmo_archive(repo, rev, subdir=None, suffix='tar.gz', preferred_region=None):
'\n An archiver for hg.mozilla.org related requests. Uses relengapi.blueprints.archiver.get_archive\n\n :param repo: the repo location off of hg.mozilla.org/\n :param rev: the rev associated with the repo\n :param subdir: optional subdir path to only archive a portion of the repo\n :param suffix: the archive extension type. defaulted to tar.gz\n :param preferred_region: the preferred s3 region to use\n '
rev = rev[0:12]
src_url = current_app.config['ARCHIVER_HGMO_URL_TEMPLATE'].format(repo=repo, rev=rev, suffix=suffix, subdir=(subdir or ))
key = '{repo}-{rev}.{suffix}'.format(repo=repo, rev=rev, suffix=suffix)
if subdir:
key += '/{}'.format(subdir)
return get_archive(src_url, key, preferred_region) |
def get_archive(src_url, key, preferred_region):
'\n A generic getter for retrieving an s3 location of an archive where the archive is based off a\n src_url.\n\n sub-dir: hg.mozilla.org supports archives of sub directories within a repository. This\n flexibility allows for creating archives of only a portion of what would normally be an entire\n repo archive.\n\n logic flow:\n If their is already a key within s3, a re-direct link is given for the\n s3 location. If the key does not exist, download the archive from src url, upload it to s3\n for each region supported and return all uploaded s3 url locations.\n\n When the key does not exist, the remaining work will be assigned to a celery background task\n with a url location returned immediately for obtaining task state updates.\n '
buckets = current_app.config['ARCHIVER_S3_BUCKETS']
random_region = buckets.keys()[randint(0, (len(buckets.keys()) - 1))]
region = (preferred_region if (preferred_region and (preferred_region in buckets)) else random_region)
bucket = buckets[region]
s3 = current_app.aws.connect_to('s3', region)
session = current_app.db.session('relengapi')
if (not s3.get_bucket(bucket).get_key(key)):
task_id = key.replace('/', '_')
tracker = tables.ArchiverTask.query.filter((tables.ArchiverTask.task_id == task_id)).first()
if (tracker and (tracker.state in FINISHED_STATES)):
log.info('Task tracker: {} exists but finished with state: {}'.format(task_id, tracker.state))
delete_tracker(tracker)
tracker = None
if (not tracker):
log.info('Creating new celery task and task tracker for: {}'.format(task_id))
task = create_and_upload_archive.apply_async(args=[src_url, key], task_id=task_id)
if (task and task.id):
pending_expires_at = (now() + datetime.timedelta(seconds=PENDING_EXPIRES_IN))
session.add(tables.ArchiverTask(task_id=task.id, s3_key=key, created_at=now(), pending_expires_at=pending_expires_at, src_url=src_url, state='PENDING'))
session.commit()
else:
return ({}, 500)
return ({}, 202, {'Location': url_for('archiver.task_status', task_id=task.id)})
log.info('generating GET URL to {}, expires in {}s'.format(key, GET_EXPIRES_IN))
signed_url = s3.generate_url(method='GET', expires_in=GET_EXPIRES_IN, bucket=bucket, key=key)
return redirect(signed_url) | -6,013,747,439,112,581,000 | A generic getter for retrieving an s3 location of an archive where the archive is based off a
src_url.
sub-dir: hg.mozilla.org supports archives of sub directories within a repository. This
flexibility allows for creating archives of only a portion of what would normally be an entire
repo archive.
logic flow:
If their is already a key within s3, a re-direct link is given for the
s3 location. If the key does not exist, download the archive from src url, upload it to s3
for each region supported and return all uploaded s3 url locations.
When the key does not exist, the remaining work will be assigned to a celery background task
with a url location returned immediately for obtaining task state updates. | relengapi/blueprints/archiver/__init__.py | get_archive | lundjordan/build-relengapi | python | def get_archive(src_url, key, preferred_region):
'\n A generic getter for retrieving an s3 location of an archive where the archive is based off a\n src_url.\n\n sub-dir: hg.mozilla.org supports archives of sub directories within a repository. This\n flexibility allows for creating archives of only a portion of what would normally be an entire\n repo archive.\n\n logic flow:\n If their is already a key within s3, a re-direct link is given for the\n s3 location. If the key does not exist, download the archive from src url, upload it to s3\n for each region supported and return all uploaded s3 url locations.\n\n When the key does not exist, the remaining work will be assigned to a celery background task\n with a url location returned immediately for obtaining task state updates.\n '
buckets = current_app.config['ARCHIVER_S3_BUCKETS']
random_region = buckets.keys()[randint(0, (len(buckets.keys()) - 1))]
region = (preferred_region if (preferred_region and (preferred_region in buckets)) else random_region)
bucket = buckets[region]
s3 = current_app.aws.connect_to('s3', region)
session = current_app.db.session('relengapi')
if (not s3.get_bucket(bucket).get_key(key)):
task_id = key.replace('/', '_')
tracker = tables.ArchiverTask.query.filter((tables.ArchiverTask.task_id == task_id)).first()
if (tracker and (tracker.state in FINISHED_STATES)):
log.info('Task tracker: {} exists but finished with state: {}'.format(task_id, tracker.state))
delete_tracker(tracker)
tracker = None
if (not tracker):
log.info('Creating new celery task and task tracker for: {}'.format(task_id))
task = create_and_upload_archive.apply_async(args=[src_url, key], task_id=task_id)
if (task and task.id):
pending_expires_at = (now() + datetime.timedelta(seconds=PENDING_EXPIRES_IN))
session.add(tables.ArchiverTask(task_id=task.id, s3_key=key, created_at=now(), pending_expires_at=pending_expires_at, src_url=src_url, state='PENDING'))
session.commit()
else:
return ({}, 500)
return ({}, 202, {'Location': url_for('archiver.task_status', task_id=task.id)})
log.info('generating GET URL to {}, expires in {}s'.format(key, GET_EXPIRES_IN))
signed_url = s3.generate_url(method='GET', expires_in=GET_EXPIRES_IN, bucket=bucket, key=key)
return redirect(signed_url) |
def test_exists() -> None:
' Program exists '
assert os.path.isfile(PRG) | -4,827,572,837,640,662,000 | Program exists | 09_grph/tests/grph_test.py | test_exists | BioPeterson/biofx_python | python | def test_exists() -> None:
' '
assert os.path.isfile(PRG) |
def test_usage() -> None:
' Usage '
(rv, out) = getstatusoutput(RUN)
assert (rv > 0)
assert out.lower().startswith('usage:') | 9,113,665,449,928,071,000 | Usage | 09_grph/tests/grph_test.py | test_usage | BioPeterson/biofx_python | python | def test_usage() -> None:
' '
(rv, out) = getstatusoutput(RUN)
assert (rv > 0)
assert out.lower().startswith('usage:') |
def test_bad_k() -> None:
' Dies on bad k '
k = random.choice(range((- 10), 1))
(rv, out) = getstatusoutput(f'{RUN} -k {k} {SAMPLE1}')
assert (rv != 0)
assert out.lower().startswith('usage:')
assert re.search(f'-k "{k}" must be > 0', out) | -1,136,513,366,063,022,600 | Dies on bad k | 09_grph/tests/grph_test.py | test_bad_k | BioPeterson/biofx_python | python | def test_bad_k() -> None:
' '
k = random.choice(range((- 10), 1))
(rv, out) = getstatusoutput(f'{RUN} -k {k} {SAMPLE1}')
assert (rv != 0)
assert out.lower().startswith('usage:')
assert re.search(f'-k "{k}" must be > 0', out) |
def test_bad_file() -> None:
' Dies on bad file '
bad = random_string()
(rv, out) = getstatusoutput('{} {}'.format(RUN, bad))
assert (rv != 0)
assert out.lower().startswith('usage:')
assert re.search(f"No such file or directory: '{bad}'", out) | 3,688,796,799,715,212,300 | Dies on bad file | 09_grph/tests/grph_test.py | test_bad_file | BioPeterson/biofx_python | python | def test_bad_file() -> None:
' '
bad = random_string()
(rv, out) = getstatusoutput('{} {}'.format(RUN, bad))
assert (rv != 0)
assert out.lower().startswith('usage:')
assert re.search(f"No such file or directory: '{bad}'", out) |
def run(in_file: str, k: int) -> None:
' Run with args '
out_file = '.'.join([in_file, str(k), 'out'])
assert os.path.isfile(out_file)
expected = open(out_file).read().rstrip()
cmd = '{} -k {} {} | sort'.format(RUN, k, in_file)
(rv, out) = getstatusoutput(cmd)
assert (rv == 0)
assert (out.rstrip() == expected) | 2,782,652,345,295,144,400 | Run with args | 09_grph/tests/grph_test.py | run | BioPeterson/biofx_python | python | def run(in_file: str, k: int) -> None:
' '
out_file = '.'.join([in_file, str(k), 'out'])
assert os.path.isfile(out_file)
expected = open(out_file).read().rstrip()
cmd = '{} -k {} {} | sort'.format(RUN, k, in_file)
(rv, out) = getstatusoutput(cmd)
assert (rv == 0)
assert (out.rstrip() == expected) |
def test_01():
' Runs OK '
run(SAMPLE1, 3) | 4,297,253,423,442,003,500 | Runs OK | 09_grph/tests/grph_test.py | test_01 | BioPeterson/biofx_python | python | def test_01():
' '
run(SAMPLE1, 3) |
def test_02() -> None:
' Runs OK '
run(SAMPLE1, 4) | 8,518,686,450,370,489,000 | Runs OK | 09_grph/tests/grph_test.py | test_02 | BioPeterson/biofx_python | python | def test_02() -> None:
' '
run(SAMPLE1, 4) |
def test_03() -> None:
' Runs OK '
run(SAMPLE1, 5) | 7,497,294,204,659,460,000 | Runs OK | 09_grph/tests/grph_test.py | test_03 | BioPeterson/biofx_python | python | def test_03() -> None:
' '
run(SAMPLE1, 5) |
def test_04() -> None:
' Runs OK '
run(SAMPLE2, 3) | -278,510,080,884,287,400 | Runs OK | 09_grph/tests/grph_test.py | test_04 | BioPeterson/biofx_python | python | def test_04() -> None:
' '
run(SAMPLE2, 3) |
def test_05() -> None:
' Runs OK '
run(SAMPLE2, 4) | -451,182,995,679,305,300 | Runs OK | 09_grph/tests/grph_test.py | test_05 | BioPeterson/biofx_python | python | def test_05() -> None:
' '
run(SAMPLE2, 4) |
def test_06() -> None:
' Runs OK '
run(SAMPLE2, 5) | -4,800,970,842,889,804,000 | Runs OK | 09_grph/tests/grph_test.py | test_06 | BioPeterson/biofx_python | python | def test_06() -> None:
' '
run(SAMPLE2, 5) |
def test_07() -> None:
' Runs OK '
run(SAMPLE3, 3) | 5,923,181,508,090,840,000 | Runs OK | 09_grph/tests/grph_test.py | test_07 | BioPeterson/biofx_python | python | def test_07() -> None:
' '
run(SAMPLE3, 3) |
def test_08() -> None:
' Runs OK '
run(SAMPLE3, 4) | -9,171,916,681,065,659,000 | Runs OK | 09_grph/tests/grph_test.py | test_08 | BioPeterson/biofx_python | python | def test_08() -> None:
' '
run(SAMPLE3, 4) |
def test_09() -> None:
' Runs OK '
run(SAMPLE3, 5) | -4,117,453,768,972,889,000 | Runs OK | 09_grph/tests/grph_test.py | test_09 | BioPeterson/biofx_python | python | def test_09() -> None:
' '
run(SAMPLE3, 5) |
def random_string() -> str:
'Generate a random string'
return ''.join(random.sample((string.ascii_letters + string.digits), k=random.randint(5, 10))) | -4,268,000,723,444,968,400 | Generate a random string | 09_grph/tests/grph_test.py | random_string | BioPeterson/biofx_python | python | def random_string() -> str:
return .join(random.sample((string.ascii_letters + string.digits), k=random.randint(5, 10))) |
def create_pipeline() -> pipeline_pb2.Pipeline:
'Creates an async pipeline for testing.'
example_gen = _example_gen().with_id('my_example_gen')
transform = _transform(examples=example_gen.outputs['examples'], a_param=10).with_id('my_transform')
trainer = _trainer(examples=example_gen.outputs['examples'], transform_graph=transform.outputs['transform_graph']).with_id('my_trainer')
pipeline = pipeline_lib.Pipeline(pipeline_name='my_pipeline', pipeline_root='/path/to/root', components=[example_gen, transform, trainer], execution_mode=pipeline_lib.ExecutionMode.ASYNC)
dsl_compiler = compiler.Compiler()
compiled_pipeline: pipeline_pb2.Pipeline = dsl_compiler.compile(pipeline)
trainer = compiled_pipeline.nodes[2].pipeline_node
assert (trainer.node_info.id == 'my_trainer')
for value in trainer.inputs.inputs.values():
value.min_count = 1
return compiled_pipeline | -8,291,859,342,785,953,000 | Creates an async pipeline for testing. | tfx/orchestration/experimental/core/testing/test_async_pipeline.py | create_pipeline | Avnish327030/tfx | python | def create_pipeline() -> pipeline_pb2.Pipeline:
example_gen = _example_gen().with_id('my_example_gen')
transform = _transform(examples=example_gen.outputs['examples'], a_param=10).with_id('my_transform')
trainer = _trainer(examples=example_gen.outputs['examples'], transform_graph=transform.outputs['transform_graph']).with_id('my_trainer')
pipeline = pipeline_lib.Pipeline(pipeline_name='my_pipeline', pipeline_root='/path/to/root', components=[example_gen, transform, trainer], execution_mode=pipeline_lib.ExecutionMode.ASYNC)
dsl_compiler = compiler.Compiler()
compiled_pipeline: pipeline_pb2.Pipeline = dsl_compiler.compile(pipeline)
trainer = compiled_pipeline.nodes[2].pipeline_node
assert (trainer.node_info.id == 'my_trainer')
for value in trainer.inputs.inputs.values():
value.min_count = 1
return compiled_pipeline |
def is_absolute_uri(url: ParseResult) -> bool:
'\n Returns True if the parsed result is an "absolute URI".\n\n We define an "absolute URI" as containing at mimimum a **scheme** and an\n **host** (a.k.a., an authority).\n\n It must contain SH according to the nomenclature defined in this proposal:\n https://gist.github.com/andrewdotn/eebeaa60d48c3c0f6f9fc75f0ede8d03#proposal\n\n Examples of absolute URIs:\n [SH ] https://example.com\n [SHP ] https://example.com/\n [SHPF] https://example.com/foo/cat.gif\n\n What are NOT absolute URIs:\n [ F] cat.gif\n [ P ] /\n [ PF] /foo/cat.gif\n [ HPF] //example.com/foo/cat.gif†\n [S F] https:cat.gif (uncommon)\n [S PF] https:/foo/cat.gif (uncommon)\n\n †: This is called a "network-path reference, and relies on inferring the scheme\n based on an existing base URI. For our purposes, this is not "absolute" enough!\n Source: https://tools.ietf.org/html/rfc3986#section-4.2\n\n '
if (url.scheme and url.netloc):
return True
return False | -397,197,891,575,155,200 | Returns True if the parsed result is an "absolute URI".
We define an "absolute URI" as containing at mimimum a **scheme** and an
**host** (a.k.a., an authority).
It must contain SH according to the nomenclature defined in this proposal:
https://gist.github.com/andrewdotn/eebeaa60d48c3c0f6f9fc75f0ede8d03#proposal
Examples of absolute URIs:
[SH ] https://example.com
[SHP ] https://example.com/
[SHPF] https://example.com/foo/cat.gif
What are NOT absolute URIs:
[ F] cat.gif
[ P ] /
[ PF] /foo/cat.gif
[ HPF] //example.com/foo/cat.gif†
[S F] https:cat.gif (uncommon)
[S PF] https:/foo/cat.gif (uncommon)
†: This is called a "network-path reference, and relies on inferring the scheme
based on an existing base URI. For our purposes, this is not "absolute" enough!
Source: https://tools.ietf.org/html/rfc3986#section-4.2 | src/CreeDictionary/CreeDictionary/templatetags/url_extras.py | is_absolute_uri | Madoshakalaka/morphodict | python | def is_absolute_uri(url: ParseResult) -> bool:
'\n Returns True if the parsed result is an "absolute URI".\n\n We define an "absolute URI" as containing at mimimum a **scheme** and an\n **host** (a.k.a., an authority).\n\n It must contain SH according to the nomenclature defined in this proposal:\n https://gist.github.com/andrewdotn/eebeaa60d48c3c0f6f9fc75f0ede8d03#proposal\n\n Examples of absolute URIs:\n [SH ] https://example.com\n [SHP ] https://example.com/\n [SHPF] https://example.com/foo/cat.gif\n\n What are NOT absolute URIs:\n [ F] cat.gif\n [ P ] /\n [ PF] /foo/cat.gif\n [ HPF] //example.com/foo/cat.gif†\n [S F] https:cat.gif (uncommon)\n [S PF] https:/foo/cat.gif (uncommon)\n\n †: This is called a "network-path reference, and relies on inferring the scheme\n based on an existing base URI. For our purposes, this is not "absolute" enough!\n Source: https://tools.ietf.org/html/rfc3986#section-4.2\n\n '
if (url.scheme and url.netloc):
return True
return False |
def to_pf_url(url: ParseResult):
'\n Returns *P*ath and *F*ile as defined here:\n https://gist.github.com/andrewdotn/eebeaa60d48c3c0f6f9fc75f0ede8d03#proposal\n '
return urlunparse(url._replace(scheme='', netloc='')) | 249,949,858,676,596,380 | Returns *P*ath and *F*ile as defined here:
https://gist.github.com/andrewdotn/eebeaa60d48c3c0f6f9fc75f0ede8d03#proposal | src/CreeDictionary/CreeDictionary/templatetags/url_extras.py | to_pf_url | Madoshakalaka/morphodict | python | def to_pf_url(url: ParseResult):
'\n Returns *P*ath and *F*ile as defined here:\n https://gist.github.com/andrewdotn/eebeaa60d48c3c0f6f9fc75f0ede8d03#proposal\n '
return urlunparse(url._replace(scheme=, netloc=)) |
@register.tag
def abstatic(parser, token):
'\n Given a relative path to a static asset, return the absolute path to the\n asset.\n\n Derived from: https://github.com/django/django/blob/635d53a86a36cde7866b9caefeb64d809e6bfcd9/django/templatetags/static.py#L143-L159\n '
return AbstaticNode.handle_token(parser, token) | -6,947,781,304,580,890,000 | Given a relative path to a static asset, return the absolute path to the
asset.
Derived from: https://github.com/django/django/blob/635d53a86a36cde7866b9caefeb64d809e6bfcd9/django/templatetags/static.py#L143-L159 | src/CreeDictionary/CreeDictionary/templatetags/url_extras.py | abstatic | Madoshakalaka/morphodict | python | @register.tag
def abstatic(parser, token):
'\n Given a relative path to a static asset, return the absolute path to the\n asset.\n\n Derived from: https://github.com/django/django/blob/635d53a86a36cde7866b9caefeb64d809e6bfcd9/django/templatetags/static.py#L143-L159\n '
return AbstaticNode.handle_token(parser, token) |
def _get_path(self, subpath: str) -> str:
'get full subpath'
return f"engagements/v{(self.options.get('version') or ENGAGEMENTS_API_VERSION)}/{subpath}" | -5,392,205,939,925,482,000 | get full subpath | hubspot3/engagements.py | _get_path | benaduggan/hubspot3 | python | def _get_path(self, subpath: str) -> str:
return f"engagements/v{(self.options.get('version') or ENGAGEMENTS_API_VERSION)}/{subpath}" |
def get(self, engagement_id, **options):
'Get a HubSpot engagement.'
return self._call(f'engagements/{engagement_id}', method='GET', **options) | -5,515,921,273,092,089,000 | Get a HubSpot engagement. | hubspot3/engagements.py | get | benaduggan/hubspot3 | python | def get(self, engagement_id, **options):
return self._call(f'engagements/{engagement_id}', method='GET', **options) |
def get_associated(self, object_type, object_id, **options) -> List[Dict]:
'\n get all engagements associated with the given object\n :param object_type: type of object to get associations on [CONTACT, COMPANY, DEAL]\n :param object_id: ID of the object to get associations on\n '
finished = False
output = []
query_limit = 100
offset = 0
while (not finished):
batch = self._call(f'engagements/associated/{object_type}/{object_id}/paged', method='GET', params={'limit': query_limit, 'offset': offset}, **options)
output.extend(batch['results'])
finished = (not batch['hasMore'])
offset = batch['offset']
return output | 892,432,714,481,501,600 | get all engagements associated with the given object
:param object_type: type of object to get associations on [CONTACT, COMPANY, DEAL]
:param object_id: ID of the object to get associations on | hubspot3/engagements.py | get_associated | benaduggan/hubspot3 | python | def get_associated(self, object_type, object_id, **options) -> List[Dict]:
'\n get all engagements associated with the given object\n :param object_type: type of object to get associations on [CONTACT, COMPANY, DEAL]\n :param object_id: ID of the object to get associations on\n '
finished = False
output = []
query_limit = 100
offset = 0
while (not finished):
batch = self._call(f'engagements/associated/{object_type}/{object_id}/paged', method='GET', params={'limit': query_limit, 'offset': offset}, **options)
output.extend(batch['results'])
finished = (not batch['hasMore'])
offset = batch['offset']
return output |
def get_all(self, **options) -> List[Dict]:
'get all engagements'
finished = False
output = []
query_limit = 250
offset = 0
while (not finished):
batch = self._call('engagements/paged', method='GET', params={'limit': query_limit, 'offset': offset}, **options)
output.extend(batch['results'])
finished = (not batch['hasMore'])
offset = batch['offset']
return output | -8,089,323,220,531,312,000 | get all engagements | hubspot3/engagements.py | get_all | benaduggan/hubspot3 | python | def get_all(self, **options) -> List[Dict]:
finished = False
output = []
query_limit = 250
offset = 0
while (not finished):
batch = self._call('engagements/paged', method='GET', params={'limit': query_limit, 'offset': offset}, **options)
output.extend(batch['results'])
finished = (not batch['hasMore'])
offset = batch['offset']
return output |
def get_recently_modified(self, since, **options) -> List[Dict]:
'get recently modified engagements'
finished = False
output = []
query_limit = 100
offset = 0
while (not finished):
batch = self._call('engagements/recent/modified', method='GET', params={'limit': query_limit, 'offset': offset, 'since': since}, **options)
output.extend(batch['results'])
finished = (not batch['hasMore'])
offset = batch['offset']
return output | 2,372,618,434,490,447,400 | get recently modified engagements | hubspot3/engagements.py | get_recently_modified | benaduggan/hubspot3 | python | def get_recently_modified(self, since, **options) -> List[Dict]:
finished = False
output = []
query_limit = 100
offset = 0
while (not finished):
batch = self._call('engagements/recent/modified', method='GET', params={'limit': query_limit, 'offset': offset, 'since': since}, **options)
output.extend(batch['results'])
finished = (not batch['hasMore'])
offset = batch['offset']
return output |
def __init__(self, batch_token=None):
'\n V1ListItemsRequest - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n '
self.swagger_types = {'batch_token': 'str'}
self.attribute_map = {'batch_token': 'batch_token'}
self._batch_token = batch_token | 8,804,516,505,985,339,000 | V1ListItemsRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition. | squareconnect/models/v1_list_items_request.py | __init__ | reduceus/connect-python-sdk | python | def __init__(self, batch_token=None):
'\n V1ListItemsRequest - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n '
self.swagger_types = {'batch_token': 'str'}
self.attribute_map = {'batch_token': 'batch_token'}
self._batch_token = batch_token |
@property
def batch_token(self):
'\n Gets the batch_token of this V1ListItemsRequest.\n A pagination cursor to retrieve the next set of results for your original query to the endpoint.\n\n :return: The batch_token of this V1ListItemsRequest.\n :rtype: str\n '
return self._batch_token | -8,642,177,761,913,007,000 | Gets the batch_token of this V1ListItemsRequest.
A pagination cursor to retrieve the next set of results for your original query to the endpoint.
:return: The batch_token of this V1ListItemsRequest.
:rtype: str | squareconnect/models/v1_list_items_request.py | batch_token | reduceus/connect-python-sdk | python | @property
def batch_token(self):
'\n Gets the batch_token of this V1ListItemsRequest.\n A pagination cursor to retrieve the next set of results for your original query to the endpoint.\n\n :return: The batch_token of this V1ListItemsRequest.\n :rtype: str\n '
return self._batch_token |
@batch_token.setter
def batch_token(self, batch_token):
'\n Sets the batch_token of this V1ListItemsRequest.\n A pagination cursor to retrieve the next set of results for your original query to the endpoint.\n\n :param batch_token: The batch_token of this V1ListItemsRequest.\n :type: str\n '
self._batch_token = batch_token | -956,991,087,329,217,000 | Sets the batch_token of this V1ListItemsRequest.
A pagination cursor to retrieve the next set of results for your original query to the endpoint.
:param batch_token: The batch_token of this V1ListItemsRequest.
:type: str | squareconnect/models/v1_list_items_request.py | batch_token | reduceus/connect-python-sdk | python | @batch_token.setter
def batch_token(self, batch_token):
'\n Sets the batch_token of this V1ListItemsRequest.\n A pagination cursor to retrieve the next set of results for your original query to the endpoint.\n\n :param batch_token: The batch_token of this V1ListItemsRequest.\n :type: str\n '
self._batch_token = batch_token |
def to_dict(self):
'\n Returns the model properties as a dict\n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | 2,191,974,537,531,847,000 | Returns the model properties as a dict | squareconnect/models/v1_list_items_request.py | to_dict | reduceus/connect-python-sdk | python | def to_dict(self):
'\n \n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result |
def to_str(self):
'\n Returns the string representation of the model\n '
return pformat(self.to_dict()) | -3,531,024,894,346,511,000 | Returns the string representation of the model | squareconnect/models/v1_list_items_request.py | to_str | reduceus/connect-python-sdk | python | def to_str(self):
'\n \n '
return pformat(self.to_dict()) |
def __repr__(self):
'\n For `print` and `pprint`\n '
return self.to_str() | 5,853,962,500,611,353,000 | For `print` and `pprint` | squareconnect/models/v1_list_items_request.py | __repr__ | reduceus/connect-python-sdk | python | def __repr__(self):
'\n \n '
return self.to_str() |
def __eq__(self, other):
'\n Returns true if both objects are equal\n '
return (self.__dict__ == other.__dict__) | 3,599,733,221,149,238,300 | Returns true if both objects are equal | squareconnect/models/v1_list_items_request.py | __eq__ | reduceus/connect-python-sdk | python | def __eq__(self, other):
'\n \n '
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'\n Returns true if both objects are not equal\n '
return (not (self == other)) | 3,600,423,175,817,510,400 | Returns true if both objects are not equal | squareconnect/models/v1_list_items_request.py | __ne__ | reduceus/connect-python-sdk | python | def __ne__(self, other):
'\n \n '
return (not (self == other)) |
def make_instance(self, include_optional):
'Test Queue\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included '
if include_optional:
return Queue(_class='', items=[openapi_client.models.queue_blocked_item.QueueBlockedItem(_class='', actions=[openapi_client.models.cause_action.CauseAction(_class='', causes=[openapi_client.models.cause_user_id_cause.CauseUserIdCause(_class='', short_description='', user_id='', user_name='')])], blocked=True, buildable=True, id=56, in_queue_since=56, params='', stuck=True, task=openapi_client.models.free_style_project.FreeStyleProject(_class='', name='', url='', color='', description='', display_name='', display_name_or_null='', full_display_name='', full_name='', buildable=True, builds=[openapi_client.models.free_style_build.FreeStyleBuild(_class='', number=56, url='', building=True, description='', display_name='', duration=56, estimated_duration=56, executor='', full_display_name='', id='', keep_log=True, queue_id=56, result='', timestamp=56, built_on='', change_set=openapi_client.models.empty_change_log_set.EmptyChangeLogSet(_class='', kind=''))], first_build=openapi_client.models.free_style_build.FreeStyleBuild(_class='', number=56, url='', building=True, description='', display_name='', duration=56, estimated_duration=56, executor='', full_display_name='', id='', keep_log=True, queue_id=56, result='', timestamp=56, built_on=''), health_report=[openapi_client.models.free_style_projecthealth_report.FreeStyleProjecthealthReport(description='', icon_class_name='', icon_url='', score=56, _class='')], in_queue=True, keep_dependencies=True, last_build=openapi_client.models.free_style_build.FreeStyleBuild(_class='', number=56, url='', building=True, description='', display_name='', duration=56, estimated_duration=56, executor='', full_display_name='', id='', keep_log=True, queue_id=56, result='', timestamp=56, built_on=''), last_completed_build=openapi_client.models.free_style_build.FreeStyleBuild(_class='', number=56, url='', building=True, description='', display_name='', duration=56, estimated_duration=56, executor='', full_display_name='', id='', keep_log=True, queue_id=56, result='', timestamp=56, built_on=''), last_failed_build='', last_stable_build=openapi_client.models.free_style_build.FreeStyleBuild(_class='', number=56, url='', building=True, description='', display_name='', duration=56, estimated_duration=56, executor='', full_display_name='', id='', keep_log=True, queue_id=56, result='', timestamp=56, built_on=''), last_successful_build=openapi_client.models.free_style_build.FreeStyleBuild(_class='', number=56, url='', building=True, description='', display_name='', duration=56, estimated_duration=56, executor='', full_display_name='', id='', keep_log=True, queue_id=56, result='', timestamp=56, built_on=''), last_unstable_build='', last_unsuccessful_build='', next_build_number=56, queue_item='', concurrent_build=True, scm=openapi_client.models.null_scm.NullSCM(_class='')), url='', why='', buildable_start_milliseconds=56)])
else:
return Queue() | -8,577,424,846,062,078,000 | Test Queue
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included | clients/python-legacy/generated/test/test_queue.py | make_instance | cliffano/jenkins-api-clients-generator | python | def make_instance(self, include_optional):
'Test Queue\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included '
if include_optional:
return Queue(_class=, items=[openapi_client.models.queue_blocked_item.QueueBlockedItem(_class=, actions=[openapi_client.models.cause_action.CauseAction(_class=, causes=[openapi_client.models.cause_user_id_cause.CauseUserIdCause(_class=, short_description=, user_id=, user_name=)])], blocked=True, buildable=True, id=56, in_queue_since=56, params=, stuck=True, task=openapi_client.models.free_style_project.FreeStyleProject(_class=, name=, url=, color=, description=, display_name=, display_name_or_null=, full_display_name=, full_name=, buildable=True, builds=[openapi_client.models.free_style_build.FreeStyleBuild(_class=, number=56, url=, building=True, description=, display_name=, duration=56, estimated_duration=56, executor=, full_display_name=, id=, keep_log=True, queue_id=56, result=, timestamp=56, built_on=, change_set=openapi_client.models.empty_change_log_set.EmptyChangeLogSet(_class=, kind=))], first_build=openapi_client.models.free_style_build.FreeStyleBuild(_class=, number=56, url=, building=True, description=, display_name=, duration=56, estimated_duration=56, executor=, full_display_name=, id=, keep_log=True, queue_id=56, result=, timestamp=56, built_on=), health_report=[openapi_client.models.free_style_projecthealth_report.FreeStyleProjecthealthReport(description=, icon_class_name=, icon_url=, score=56, _class=)], in_queue=True, keep_dependencies=True, last_build=openapi_client.models.free_style_build.FreeStyleBuild(_class=, number=56, url=, building=True, description=, display_name=, duration=56, estimated_duration=56, executor=, full_display_name=, id=, keep_log=True, queue_id=56, result=, timestamp=56, built_on=), last_completed_build=openapi_client.models.free_style_build.FreeStyleBuild(_class=, number=56, url=, building=True, description=, display_name=, duration=56, estimated_duration=56, executor=, full_display_name=, id=, keep_log=True, queue_id=56, result=, timestamp=56, built_on=), last_failed_build=, last_stable_build=openapi_client.models.free_style_build.FreeStyleBuild(_class=, number=56, url=, building=True, description=, display_name=, duration=56, estimated_duration=56, executor=, full_display_name=, id=, keep_log=True, queue_id=56, result=, timestamp=56, built_on=), last_successful_build=openapi_client.models.free_style_build.FreeStyleBuild(_class=, number=56, url=, building=True, description=, display_name=, duration=56, estimated_duration=56, executor=, full_display_name=, id=, keep_log=True, queue_id=56, result=, timestamp=56, built_on=), last_unstable_build=, last_unsuccessful_build=, next_build_number=56, queue_item=, concurrent_build=True, scm=openapi_client.models.null_scm.NullSCM(_class=)), url=, why=, buildable_start_milliseconds=56)])
else:
return Queue() |
def testQueue(self):
'Test Queue'
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True) | -1,662,886,051,552,567,000 | Test Queue | clients/python-legacy/generated/test/test_queue.py | testQueue | cliffano/jenkins-api-clients-generator | python | def testQueue(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True) |
def index_of_masked_word(sentence, bert):
"Return index of the masked word in `sentence` using `bert`'s' tokenizer.\n\n We use this function to calculate the linear distance between the target\n and controller as BERT sees it.\n\n Parameters\n ----------\n sentence : str\n\n Returns\n -------\n int\n\n "
tokens = bert.tokenize(sentence)
try:
return tokens.index(MASK)
except ValueError:
return (- 1) | -5,210,766,759,479,877,000 | Return index of the masked word in `sentence` using `bert`'s' tokenizer.
We use this function to calculate the linear distance between the target
and controller as BERT sees it.
Parameters
----------
sentence : str
Returns
-------
int | src/experiment.py | index_of_masked_word | geoffbacon/does-bert-agree | python | def index_of_masked_word(sentence, bert):
"Return index of the masked word in `sentence` using `bert`'s' tokenizer.\n\n We use this function to calculate the linear distance between the target\n and controller as BERT sees it.\n\n Parameters\n ----------\n sentence : str\n\n Returns\n -------\n int\n\n "
tokens = bert.tokenize(sentence)
try:
return tokens.index(MASK)
except ValueError:
return (- 1) |
def run(language, force_multilingual=False, fold_case=True, gpu=True):
'Run the experiment for `language`.\n\n Parameters\n ----------\n language : str\n force_multilingual : bool\n Whether to use the multilingual model even on English\n fold_case : bool\n Whether to ignore caseing differences after making predictions\n gpu : bool\n Whether to run on GPU or not (useful for debugging)\n\n Returns\n -------\n pd.DataFrame\n\n '
if ((language == 'English') and (not force_multilingual)):
bert = BERT(ENGLISH_MODEL, gpu=gpu)
else:
bert = BERT(MULTILINGUAL_MODEL, gpu=gpu)
vocab = bert.vocab
if fold_case:
vocab = [word.lower() for word in vocab]
code = LANGUAGES[language]
cloze = pd.read_csv(os.path.join(CLOZE_DIR, f'{code}.csv'))
num_examples = (len(cloze) * 2)
print(f'''
Number of examples for {language}: {num_examples}''')
print_every = (num_examples // 100)
features = pd.read_csv(os.path.join(FEATURES_DIR, f'{code}.csv'), dtype={'person': str})
features = features[features['word'].isin(vocab)]
cols = ['number', 'gender', 'case', 'person']
result = []
(count, total) = (0, 0)
for (_, example) in cloze.iterrows():
for mask in ['masked', 'other_masked']:
try:
predictions = bert.predict(example[mask], fold_case)
except ValueError:
continue
predictions = features.merge(predictions, how='left', left_on='word', right_index=True)
predictions = predictions[(predictions['pos'] == example['pos'])]
predictions['correct'] = (predictions[cols] == example[cols]).all(axis=1)
predictions = predictions.groupby('word').agg({'correct': any, 'p': 'first'})
mean = predictions.groupby('correct')['p'].mean()
try:
example['correct'] = mean[True]
except KeyError:
example['correct'] = 0.0
try:
example['incorrect'] = mean[False]
except KeyError:
example['incorrect'] = 0.0
masked_index = index_of_masked_word(example['masked'], bert)
other_index = index_of_masked_word(example['other_masked'], bert)
example['distance'] = abs((masked_index - other_index))
result.append(example)
if (example['correct'] > example['incorrect']):
count += 1
total += 1
if ((total % print_every) == 0):
percent_correct = round((100 * (count / total)), 3)
percent_done = round((100 * (total / num_examples)), 3)
print(f'{percent_correct}% correct with {percent_done}% done')
result = pd.DataFrame(result)
result['right'] = (result['correct'] > result['incorrect'])
file_name = os.path.join(EXPERIMENTS_DIR, f'{code}.csv')
result.to_csv(file_name, index=False)
return result | -3,874,926,264,927,106,000 | Run the experiment for `language`.
Parameters
----------
language : str
force_multilingual : bool
Whether to use the multilingual model even on English
fold_case : bool
Whether to ignore caseing differences after making predictions
gpu : bool
Whether to run on GPU or not (useful for debugging)
Returns
-------
pd.DataFrame | src/experiment.py | run | geoffbacon/does-bert-agree | python | def run(language, force_multilingual=False, fold_case=True, gpu=True):
'Run the experiment for `language`.\n\n Parameters\n ----------\n language : str\n force_multilingual : bool\n Whether to use the multilingual model even on English\n fold_case : bool\n Whether to ignore caseing differences after making predictions\n gpu : bool\n Whether to run on GPU or not (useful for debugging)\n\n Returns\n -------\n pd.DataFrame\n\n '
if ((language == 'English') and (not force_multilingual)):
bert = BERT(ENGLISH_MODEL, gpu=gpu)
else:
bert = BERT(MULTILINGUAL_MODEL, gpu=gpu)
vocab = bert.vocab
if fold_case:
vocab = [word.lower() for word in vocab]
code = LANGUAGES[language]
cloze = pd.read_csv(os.path.join(CLOZE_DIR, f'{code}.csv'))
num_examples = (len(cloze) * 2)
print(f'
Number of examples for {language}: {num_examples}')
print_every = (num_examples // 100)
features = pd.read_csv(os.path.join(FEATURES_DIR, f'{code}.csv'), dtype={'person': str})
features = features[features['word'].isin(vocab)]
cols = ['number', 'gender', 'case', 'person']
result = []
(count, total) = (0, 0)
for (_, example) in cloze.iterrows():
for mask in ['masked', 'other_masked']:
try:
predictions = bert.predict(example[mask], fold_case)
except ValueError:
continue
predictions = features.merge(predictions, how='left', left_on='word', right_index=True)
predictions = predictions[(predictions['pos'] == example['pos'])]
predictions['correct'] = (predictions[cols] == example[cols]).all(axis=1)
predictions = predictions.groupby('word').agg({'correct': any, 'p': 'first'})
mean = predictions.groupby('correct')['p'].mean()
try:
example['correct'] = mean[True]
except KeyError:
example['correct'] = 0.0
try:
example['incorrect'] = mean[False]
except KeyError:
example['incorrect'] = 0.0
masked_index = index_of_masked_word(example['masked'], bert)
other_index = index_of_masked_word(example['other_masked'], bert)
example['distance'] = abs((masked_index - other_index))
result.append(example)
if (example['correct'] > example['incorrect']):
count += 1
total += 1
if ((total % print_every) == 0):
percent_correct = round((100 * (count / total)), 3)
percent_done = round((100 * (total / num_examples)), 3)
print(f'{percent_correct}% correct with {percent_done}% done')
result = pd.DataFrame(result)
result['right'] = (result['correct'] > result['incorrect'])
file_name = os.path.join(EXPERIMENTS_DIR, f'{code}.csv')
result.to_csv(file_name, index=False)
return result |
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
'\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n '
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis])
print('Normalized confusion matrix')
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = (cm.max() / 2.0)
for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[(i, j)], horizontalalignment='center', color=('white' if (cm[(i, j)] > thresh) else 'black'))
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label') | -5,036,289,120,716,576,000 | This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`. | TrainValue/multiclass_svm.py | plot_confusion_matrix | xuanthuong/DOU-SI | python | def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
'\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n '
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis])
print('Normalized confusion matrix')
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = (cm.max() / 2.0)
for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[(i, j)], horizontalalignment='center', color=('white' if (cm[(i, j)] > thresh) else 'black'))
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label') |
def heading_deg(self):
' Calculate heading in degrees of vector from origin '
heading_rad = math.atan2(self.x, self.y)
heading_deg_normalised = ((math.degrees(heading_rad) + 360) % 360)
return heading_deg_normalised | 4,203,066,159,195,690,500 | Calculate heading in degrees of vector from origin | gym_jsbsim/properties.py | heading_deg | songhyonkim/gym-ai-pilot | python | def heading_deg(self):
' '
heading_rad = math.atan2(self.x, self.y)
heading_deg_normalised = ((math.degrees(heading_rad) + 360) % 360)
return heading_deg_normalised |
def heading_deg_to(self, destination: 'GeodeticPosition') -> float:
' Determines heading in degrees of course between self and destination '
difference_vector = (destination - self)
return difference_vector.heading_deg() | 662,026,814,692,946,400 | Determines heading in degrees of course between self and destination | gym_jsbsim/properties.py | heading_deg_to | songhyonkim/gym-ai-pilot | python | def heading_deg_to(self, destination: 'GeodeticPosition') -> float:
' '
difference_vector = (destination - self)
return difference_vector.heading_deg() |
@staticmethod
def from_sim(sim: 'simulation.Simulation') -> 'GeodeticPosition':
' Return a GeodeticPosition object with lat and lon from simulation '
lat_deg = sim[lat_geod_deg]
lon_deg = sim[lng_geoc_deg]
return GeodeticPosition(lat_deg, lon_deg) | 6,759,532,399,933,314,000 | Return a GeodeticPosition object with lat and lon from simulation | gym_jsbsim/properties.py | from_sim | songhyonkim/gym-ai-pilot | python | @staticmethod
def from_sim(sim: 'simulation.Simulation') -> 'GeodeticPosition':
' '
lat_deg = sim[lat_geod_deg]
lon_deg = sim[lng_geoc_deg]
return GeodeticPosition(lat_deg, lon_deg) |
def __sub__(self, other) -> Vector2:
' Returns difference between two coords as (delta_lat, delta_long) '
return Vector2((self.lon - other.lon), (self.lat - other.lat)) | -3,347,112,684,996,750,300 | Returns difference between two coords as (delta_lat, delta_long) | gym_jsbsim/properties.py | __sub__ | songhyonkim/gym-ai-pilot | python | def __sub__(self, other) -> Vector2:
' '
return Vector2((self.lon - other.lon), (self.lat - other.lat)) |
@pytest.fixture
def mock_publish(hass):
'Initialize components.'
(yield hass.loop.run_until_complete(async_mock_mqtt_component(hass))) | 3,935,108,766,096,235,000 | Initialize components. | tests/components/mqtt/test_switch.py | mock_publish | BobbyBleacher/home-assistant | python | @pytest.fixture
def mock_publish(hass):
(yield hass.loop.run_until_complete(async_mock_mqtt_component(hass))) |
async def test_controlling_state_via_topic(hass, mock_publish):
'Test the controlling state via topic.'
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'payload_on': 1, 'payload_off': 0}}))
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
assert (not state.attributes.get(ATTR_ASSUMED_STATE))
async_fire_mqtt_message(hass, 'state-topic', '1')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
async_fire_mqtt_message(hass, 'state-topic', '0')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state) | 8,155,222,178,456,217,000 | Test the controlling state via topic. | tests/components/mqtt/test_switch.py | test_controlling_state_via_topic | BobbyBleacher/home-assistant | python | async def test_controlling_state_via_topic(hass, mock_publish):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'payload_on': 1, 'payload_off': 0}}))
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
assert (not state.attributes.get(ATTR_ASSUMED_STATE))
async_fire_mqtt_message(hass, 'state-topic', '1')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
async_fire_mqtt_message(hass, 'state-topic', '0')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state) |
async def test_sending_mqtt_commands_and_optimistic(hass, mock_publish):
'Test the sending MQTT commands in optimistic mode.'
fake_state = ha.State('switch.test', 'on')
with patch('homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state', return_value=mock_coro(fake_state)):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'command_topic': 'command-topic', 'payload_on': 'beer on', 'payload_off': 'beer off', 'qos': '2'}}))
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
assert state.attributes.get(ATTR_ASSUMED_STATE)
common.turn_on(hass, 'switch.test')
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('command-topic', 'beer on', 2, False)
mock_publish.async_publish.reset_mock()
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
common.turn_off(hass, 'switch.test')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('command-topic', 'beer off', 2, False)
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state) | -1,817,929,223,250,078,200 | Test the sending MQTT commands in optimistic mode. | tests/components/mqtt/test_switch.py | test_sending_mqtt_commands_and_optimistic | BobbyBleacher/home-assistant | python | async def test_sending_mqtt_commands_and_optimistic(hass, mock_publish):
fake_state = ha.State('switch.test', 'on')
with patch('homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state', return_value=mock_coro(fake_state)):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'command_topic': 'command-topic', 'payload_on': 'beer on', 'payload_off': 'beer off', 'qos': '2'}}))
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
assert state.attributes.get(ATTR_ASSUMED_STATE)
common.turn_on(hass, 'switch.test')
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('command-topic', 'beer on', 2, False)
mock_publish.async_publish.reset_mock()
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
common.turn_off(hass, 'switch.test')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
mock_publish.async_publish.assert_called_once_with('command-topic', 'beer off', 2, False)
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state) |
async def test_controlling_state_via_topic_and_json_message(hass, mock_publish):
'Test the controlling state via topic and JSON message.'
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'payload_on': 'beer on', 'payload_off': 'beer off', 'value_template': '{{ value_json.val }}'}}))
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
async_fire_mqtt_message(hass, 'state-topic', '{"val":"beer on"}')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
async_fire_mqtt_message(hass, 'state-topic', '{"val":"beer off"}')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state) | -2,006,936,702,478,254,000 | Test the controlling state via topic and JSON message. | tests/components/mqtt/test_switch.py | test_controlling_state_via_topic_and_json_message | BobbyBleacher/home-assistant | python | async def test_controlling_state_via_topic_and_json_message(hass, mock_publish):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'payload_on': 'beer on', 'payload_off': 'beer off', 'value_template': '{{ value_json.val }}'}}))
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
async_fire_mqtt_message(hass, 'state-topic', '{"val":"beer on"}')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
async_fire_mqtt_message(hass, 'state-topic', '{"val":"beer off"}')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state) |
async def test_default_availability_payload(hass, mock_publish):
'Test the availability payload.'
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'availability_topic': 'availability_topic', 'payload_on': 1, 'payload_off': 0}}))
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability_topic', 'online')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
assert (not state.attributes.get(ATTR_ASSUMED_STATE))
async_fire_mqtt_message(hass, 'availability_topic', 'offline')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'state-topic', '1')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability_topic', 'online')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state) | -3,038,129,219,349,842,400 | Test the availability payload. | tests/components/mqtt/test_switch.py | test_default_availability_payload | BobbyBleacher/home-assistant | python | async def test_default_availability_payload(hass, mock_publish):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'availability_topic': 'availability_topic', 'payload_on': 1, 'payload_off': 0}}))
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability_topic', 'online')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
assert (not state.attributes.get(ATTR_ASSUMED_STATE))
async_fire_mqtt_message(hass, 'availability_topic', 'offline')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'state-topic', '1')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability_topic', 'online')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state) |
async def test_custom_availability_payload(hass, mock_publish):
'Test the availability payload.'
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'availability_topic': 'availability_topic', 'payload_on': 1, 'payload_off': 0, 'payload_available': 'good', 'payload_not_available': 'nogood'}}))
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability_topic', 'good')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
assert (not state.attributes.get(ATTR_ASSUMED_STATE))
async_fire_mqtt_message(hass, 'availability_topic', 'nogood')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'state-topic', '1')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability_topic', 'good')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state) | 2,127,276,169,409,135,600 | Test the availability payload. | tests/components/mqtt/test_switch.py | test_custom_availability_payload | BobbyBleacher/home-assistant | python | async def test_custom_availability_payload(hass, mock_publish):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'availability_topic': 'availability_topic', 'payload_on': 1, 'payload_off': 0, 'payload_available': 'good', 'payload_not_available': 'nogood'}}))
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability_topic', 'good')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
assert (not state.attributes.get(ATTR_ASSUMED_STATE))
async_fire_mqtt_message(hass, 'availability_topic', 'nogood')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'state-topic', '1')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_UNAVAILABLE == state.state)
async_fire_mqtt_message(hass, 'availability_topic', 'good')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state) |
async def test_custom_state_payload(hass, mock_publish):
'Test the state payload.'
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'payload_on': 1, 'payload_off': 0, 'state_on': 'HIGH', 'state_off': 'LOW'}}))
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
assert (not state.attributes.get(ATTR_ASSUMED_STATE))
async_fire_mqtt_message(hass, 'state-topic', 'HIGH')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
async_fire_mqtt_message(hass, 'state-topic', 'LOW')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state) | -7,688,893,017,872,148,000 | Test the state payload. | tests/components/mqtt/test_switch.py | test_custom_state_payload | BobbyBleacher/home-assistant | python | async def test_custom_state_payload(hass, mock_publish):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'payload_on': 1, 'payload_off': 0, 'state_on': 'HIGH', 'state_off': 'LOW'}}))
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state)
assert (not state.attributes.get(ATTR_ASSUMED_STATE))
async_fire_mqtt_message(hass, 'state-topic', 'HIGH')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_ON == state.state)
async_fire_mqtt_message(hass, 'state-topic', 'LOW')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (STATE_OFF == state.state) |
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
'Test the setting of attribute via MQTT with JSON payload.'
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'command_topic': 'test-topic', 'json_attributes_topic': 'attr-topic'}}))
async_fire_mqtt_message(hass, 'attr-topic', '{ "val": "100" }')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert ('100' == state.attributes.get('val')) | 177,271,805,859,749,000 | Test the setting of attribute via MQTT with JSON payload. | tests/components/mqtt/test_switch.py | test_setting_attribute_via_mqtt_json_message | BobbyBleacher/home-assistant | python | async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'command_topic': 'test-topic', 'json_attributes_topic': 'attr-topic'}}))
async_fire_mqtt_message(hass, 'attr-topic', '{ "val": "100" }')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert ('100' == state.attributes.get('val')) |
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
'Test attributes get extracted from a JSON result.'
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'command_topic': 'test-topic', 'json_attributes_topic': 'attr-topic'}}))
async_fire_mqtt_message(hass, 'attr-topic', '[ "list", "of", "things"]')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (state.attributes.get('val') is None)
assert ('JSON result was not a dictionary' in caplog.text) | -1,280,874,276,873,739,300 | Test attributes get extracted from a JSON result. | tests/components/mqtt/test_switch.py | test_update_with_json_attrs_not_dict | BobbyBleacher/home-assistant | python | async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'command_topic': 'test-topic', 'json_attributes_topic': 'attr-topic'}}))
async_fire_mqtt_message(hass, 'attr-topic', '[ "list", "of", "things"]')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (state.attributes.get('val') is None)
assert ('JSON result was not a dictionary' in caplog.text) |
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
'Test attributes get extracted from a JSON result.'
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'command_topic': 'test-topic', 'json_attributes_topic': 'attr-topic'}}))
async_fire_mqtt_message(hass, 'attr-topic', 'This is not JSON')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (state.attributes.get('val') is None)
assert ('Erroneous JSON: This is not JSON' in caplog.text) | 586,962,458,147,496,800 | Test attributes get extracted from a JSON result. | tests/components/mqtt/test_switch.py | test_update_with_json_attrs_bad_JSON | BobbyBleacher/home-assistant | python | async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'command_topic': 'test-topic', 'json_attributes_topic': 'attr-topic'}}))
async_fire_mqtt_message(hass, 'attr-topic', 'This is not JSON')
(await hass.async_block_till_done())
state = hass.states.get('switch.test')
assert (state.attributes.get('val') is None)
assert ('Erroneous JSON: This is not JSON' in caplog.text) |
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
'Test update of discovered MQTTAttributes.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer", "command_topic": "test_topic", "json_attributes_topic": "attr-topic1" }'
data2 = '{ "name": "Beer", "command_topic": "test_topic", "json_attributes_topic": "attr-topic2" }'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data1)
(await hass.async_block_till_done())
async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "100" }')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert ('100' == state.attributes.get('val'))
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "50" }')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert ('100' == state.attributes.get('val'))
async_fire_mqtt_message(hass, 'attr-topic2', '{ "val": "75" }')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert ('75' == state.attributes.get('val')) | -5,929,376,502,707,091,000 | Test update of discovered MQTTAttributes. | tests/components/mqtt/test_switch.py | test_discovery_update_attr | BobbyBleacher/home-assistant | python | async def test_discovery_update_attr(hass, mqtt_mock, caplog):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer", "command_topic": "test_topic", "json_attributes_topic": "attr-topic1" }'
data2 = '{ "name": "Beer", "command_topic": "test_topic", "json_attributes_topic": "attr-topic2" }'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data1)
(await hass.async_block_till_done())
async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "100" }')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert ('100' == state.attributes.get('val'))
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "50" }')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert ('100' == state.attributes.get('val'))
async_fire_mqtt_message(hass, 'attr-topic2', '{ "val": "75" }')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert ('75' == state.attributes.get('val')) |
async def test_unique_id(hass):
'Test unique id option only creates one switch per unique_id.'
(await async_mock_mqtt_component(hass))
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: [{'platform': 'mqtt', 'name': 'Test 1', 'state_topic': 'test-topic', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}, {'platform': 'mqtt', 'name': 'Test 2', 'state_topic': 'test-topic', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}]}))
async_fire_mqtt_message(hass, 'test-topic', 'payload')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
assert (len(hass.states.async_entity_ids()) == 2) | 5,170,561,312,111,095,000 | Test unique id option only creates one switch per unique_id. | tests/components/mqtt/test_switch.py | test_unique_id | BobbyBleacher/home-assistant | python | async def test_unique_id(hass):
(await async_mock_mqtt_component(hass))
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: [{'platform': 'mqtt', 'name': 'Test 1', 'state_topic': 'test-topic', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}, {'platform': 'mqtt', 'name': 'Test 2', 'state_topic': 'test-topic', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}]}))
async_fire_mqtt_message(hass, 'test-topic', 'payload')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
assert (len(hass.states.async_entity_ids()) == 2) |
async def test_discovery_removal_switch(hass, mqtt_mock, caplog):
'Test removal of discovered switch.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data = '{ "name": "Beer", "state_topic": "test_topic", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', '')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is None) | 1,035,768,021,042,002,600 | Test removal of discovered switch. | tests/components/mqtt/test_switch.py | test_discovery_removal_switch | BobbyBleacher/home-assistant | python | async def test_discovery_removal_switch(hass, mqtt_mock, caplog):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data = '{ "name": "Beer", "state_topic": "test_topic", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', )
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is None) |
async def test_discovery_update_switch(hass, mqtt_mock, caplog):
'Test update of discovered switch.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer", "state_topic": "test_topic", "command_topic": "test_topic" }'
data2 = '{ "name": "Milk", "state_topic": "test_topic", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data1)
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is not None)
assert (state.name == 'Milk')
state = hass.states.get('switch.milk')
assert (state is None) | -1,301,209,773,686,279,000 | Test update of discovered switch. | tests/components/mqtt/test_switch.py | test_discovery_update_switch | BobbyBleacher/home-assistant | python | async def test_discovery_update_switch(hass, mqtt_mock, caplog):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer", "state_topic": "test_topic", "command_topic": "test_topic" }'
data2 = '{ "name": "Milk", "state_topic": "test_topic", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data1)
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is not None)
assert (state.name == 'Beer')
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is not None)
assert (state.name == 'Milk')
state = hass.states.get('switch.milk')
assert (state is None) |
async def test_discovery_broken(hass, mqtt_mock, caplog):
'Test handling of bad discovery message.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk", "state_topic": "test_topic", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data1)
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is None)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.milk')
assert (state is not None)
assert (state.name == 'Milk')
state = hass.states.get('switch.beer')
assert (state is None) | 959,381,512,160,867,500 | Test handling of bad discovery message. | tests/components/mqtt/test_switch.py | test_discovery_broken | BobbyBleacher/home-assistant | python | async def test_discovery_broken(hass, mqtt_mock, caplog):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
(await async_start(hass, 'homeassistant', {}, entry))
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk", "state_topic": "test_topic", "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data1)
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is None)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data2)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.milk')
assert (state is not None)
assert (state.name == 'Milk')
state = hass.states.get('switch.beer')
assert (state is None) |
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
'Test MQTT switch device registry integration.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
(await async_start(hass, 'homeassistant', {}, entry))
registry = (await hass.helpers.device_registry.async_get_registry())
data = json.dumps({'platform': 'mqtt', 'name': 'Test 1', 'state_topic': 'test-topic', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'})
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.identifiers == {('mqtt', 'helloworld')})
assert (device.connections == {('mac', '02:5b:26:a8:dc:12')})
assert (device.manufacturer == 'Whatever')
assert (device.name == 'Beer')
assert (device.model == 'Glass')
assert (device.sw_version == '0.1-beta') | 9,132,497,107,045,344,000 | Test MQTT switch device registry integration. | tests/components/mqtt/test_switch.py | test_entity_device_info_with_identifier | BobbyBleacher/home-assistant | python | async def test_entity_device_info_with_identifier(hass, mqtt_mock):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
(await async_start(hass, 'homeassistant', {}, entry))
registry = (await hass.helpers.device_registry.async_get_registry())
data = json.dumps({'platform': 'mqtt', 'name': 'Test 1', 'state_topic': 'test-topic', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'})
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.identifiers == {('mqtt', 'helloworld')})
assert (device.connections == {('mac', '02:5b:26:a8:dc:12')})
assert (device.manufacturer == 'Whatever')
assert (device.name == 'Beer')
assert (device.model == 'Glass')
assert (device.sw_version == '0.1-beta') |
async def test_entity_device_info_update(hass, mqtt_mock):
'Test device registry update.'
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
(await async_start(hass, 'homeassistant', {}, entry))
registry = (await hass.helpers.device_registry.async_get_registry())
config = {'platform': 'mqtt', 'name': 'Test 1', 'state_topic': 'test-topic', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'}
data = json.dumps(config)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.name == 'Beer')
config['device']['name'] = 'Milk'
data = json.dumps(config)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.name == 'Milk') | 1,981,086,579,315,102,700 | Test device registry update. | tests/components/mqtt/test_switch.py | test_entity_device_info_update | BobbyBleacher/home-assistant | python | async def test_entity_device_info_update(hass, mqtt_mock):
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
(await async_start(hass, 'homeassistant', {}, entry))
registry = (await hass.helpers.device_registry.async_get_registry())
config = {'platform': 'mqtt', 'name': 'Test 1', 'state_topic': 'test-topic', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'}
data = json.dumps(config)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.name == 'Beer')
config['device']['name'] = 'Milk'
data = json.dumps(config)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config', data)
(await hass.async_block_till_done())
(await hass.async_block_till_done())
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert (device is not None)
assert (device.name == 'Milk') |
async def test_entity_id_update(hass, mqtt_mock):
'Test MQTT subscriptions are managed when entity_id is updated.'
registry = mock_registry(hass, {})
mock_mqtt = (await async_mock_mqtt_component(hass))
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: [{'platform': 'mqtt', 'name': 'beer', 'state_topic': 'test-topic', 'command_topic': 'command-topic', 'availability_topic': 'avty-topic', 'unique_id': 'TOTALLY_UNIQUE'}]}))
state = hass.states.get('switch.beer')
assert (state is not None)
assert (mock_mqtt.async_subscribe.call_count == 2)
mock_mqtt.async_subscribe.assert_any_call('test-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.assert_any_call('avty-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity('switch.beer', new_entity_id='switch.milk')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is None)
state = hass.states.get('switch.milk')
assert (state is not None)
assert (mock_mqtt.async_subscribe.call_count == 2)
mock_mqtt.async_subscribe.assert_any_call('test-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.assert_any_call('avty-topic', ANY, 0, 'utf-8') | 2,420,902,222,189,967,000 | Test MQTT subscriptions are managed when entity_id is updated. | tests/components/mqtt/test_switch.py | test_entity_id_update | BobbyBleacher/home-assistant | python | async def test_entity_id_update(hass, mqtt_mock):
registry = mock_registry(hass, {})
mock_mqtt = (await async_mock_mqtt_component(hass))
assert (await async_setup_component(hass, switch.DOMAIN, {switch.DOMAIN: [{'platform': 'mqtt', 'name': 'beer', 'state_topic': 'test-topic', 'command_topic': 'command-topic', 'availability_topic': 'avty-topic', 'unique_id': 'TOTALLY_UNIQUE'}]}))
state = hass.states.get('switch.beer')
assert (state is not None)
assert (mock_mqtt.async_subscribe.call_count == 2)
mock_mqtt.async_subscribe.assert_any_call('test-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.assert_any_call('avty-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity('switch.beer', new_entity_id='switch.milk')
(await hass.async_block_till_done())
(await hass.async_block_till_done())
state = hass.states.get('switch.beer')
assert (state is None)
state = hass.states.get('switch.milk')
assert (state is not None)
assert (mock_mqtt.async_subscribe.call_count == 2)
mock_mqtt.async_subscribe.assert_any_call('test-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.assert_any_call('avty-topic', ANY, 0, 'utf-8') |
def greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=True):
'\n Update positive_indices, negative_indices with one round of feedback. Provide feedback for top-ranked gallery.\n Note that distmat is corrupted if inplace=True.\n\n :param distmat: q x g Tensor (adjusted query to gallery)\n :param q_pids: q\n :param g_pids: g\n :param positive_indices: q x g\n :param negative_indices: q x g\n :return:\n (positive_indices, negative_indices, matches)\n '
(q, g) = tuple(distmat.shape)
if (not inplace):
distmat = distmat.clone().detach()
positive_indices = positive_indices.copy()
negative_indices = negative_indices.copy()
distmat[positive_indices] = float('inf')
distmat[negative_indices] = float('inf')
indices = distmat.argmin(dim=1)
pmap = (g_pids[indices] == q_pids)
positive_q = torch.arange(0, q)[pmap]
negative_q = torch.arange(0, q)[(pmap == False)]
positive_g = indices[pmap]
negative_g = indices[(pmap == False)]
existing = positive_indices[(positive_q, positive_g)]
assert (not existing.any())
positive_indices[(positive_q, positive_g)] = True
existing = negative_indices[(negative_q, negative_g)]
assert (not existing.any())
negative_indices[(negative_q, negative_g)] = True
return (positive_indices, negative_indices, pmap) | 4,714,801,363,259,745,000 | Update positive_indices, negative_indices with one round of feedback. Provide feedback for top-ranked gallery.
Note that distmat is corrupted if inplace=True.
:param distmat: q x g Tensor (adjusted query to gallery)
:param q_pids: q
:param g_pids: g
:param positive_indices: q x g
:param negative_indices: q x g
:return:
(positive_indices, negative_indices, matches) | hitl/feedback.py | greedy_feedback | itsnamgyu/reid-metric | python | def greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=True):
'\n Update positive_indices, negative_indices with one round of feedback. Provide feedback for top-ranked gallery.\n Note that distmat is corrupted if inplace=True.\n\n :param distmat: q x g Tensor (adjusted query to gallery)\n :param q_pids: q\n :param g_pids: g\n :param positive_indices: q x g\n :param negative_indices: q x g\n :return:\n (positive_indices, negative_indices, matches)\n '
(q, g) = tuple(distmat.shape)
if (not inplace):
distmat = distmat.clone().detach()
positive_indices = positive_indices.copy()
negative_indices = negative_indices.copy()
distmat[positive_indices] = float('inf')
distmat[negative_indices] = float('inf')
indices = distmat.argmin(dim=1)
pmap = (g_pids[indices] == q_pids)
positive_q = torch.arange(0, q)[pmap]
negative_q = torch.arange(0, q)[(pmap == False)]
positive_g = indices[pmap]
negative_g = indices[(pmap == False)]
existing = positive_indices[(positive_q, positive_g)]
assert (not existing.any())
positive_indices[(positive_q, positive_g)] = True
existing = negative_indices[(negative_q, negative_g)]
assert (not existing.any())
negative_indices[(negative_q, negative_g)] = True
return (positive_indices, negative_indices, pmap) |
def naive_round(qf, gf, q_pids, g_pids, positive_indices=None, negative_indices=None, inplace=True, previous_distmat=None, device=None):
'\n qf: q x m\n gf: g x m\n q_pids: q\n g_pids: g\n positive_indices: q x g\n negative_indices: q x g\n previous_distmat: adjusted distmat (== compute_distmat(qf, gf) only at init)\n '
(q, g) = (qf.shape[0], gf.shape[0])
assert (qf.shape[1] == gf.shape[1])
if (positive_indices is None):
positive_indices = init_feedback_indices(q, g, device=device)
if (negative_indices is None):
negative_indices = init_feedback_indices(q, g, device=device)
if (previous_distmat is None):
distmat = compute_distmat(qf, gf)
else:
distmat = previous_distmat
res = greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=inplace)
(positive_indices, negative_indices, matches) = res
distmat = compute_distmat(qf, gf)
distmat[positive_indices] = 0
distmat[negative_indices] = float('inf')
return (distmat, positive_indices, negative_indices, matches) | -8,445,332,685,900,629,000 | qf: q x m
gf: g x m
q_pids: q
g_pids: g
positive_indices: q x g
negative_indices: q x g
previous_distmat: adjusted distmat (== compute_distmat(qf, gf) only at init) | hitl/feedback.py | naive_round | itsnamgyu/reid-metric | python | def naive_round(qf, gf, q_pids, g_pids, positive_indices=None, negative_indices=None, inplace=True, previous_distmat=None, device=None):
'\n qf: q x m\n gf: g x m\n q_pids: q\n g_pids: g\n positive_indices: q x g\n negative_indices: q x g\n previous_distmat: adjusted distmat (== compute_distmat(qf, gf) only at init)\n '
(q, g) = (qf.shape[0], gf.shape[0])
assert (qf.shape[1] == gf.shape[1])
if (positive_indices is None):
positive_indices = init_feedback_indices(q, g, device=device)
if (negative_indices is None):
negative_indices = init_feedback_indices(q, g, device=device)
if (previous_distmat is None):
distmat = compute_distmat(qf, gf)
else:
distmat = previous_distmat
res = greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=inplace)
(positive_indices, negative_indices, matches) = res
distmat = compute_distmat(qf, gf)
distmat[positive_indices] = 0
distmat[negative_indices] = float('inf')
return (distmat, positive_indices, negative_indices, matches) |
@staticmethod
def _read_image_and_resize(img_entry: Union[(str, 'numpy.array')], img_width: int, img_height: int, should_resize: bool, num_channels: int, resize_method: str, user_specified_num_channels: int):
"\n :param img_entry Union[str, 'numpy.array']: if str file path to the\n image else numpy.array of the image itself\n :param img_width: expected width of the image\n :param img_height: expected height of the image\n :param should_resize: Should the image be resized?\n :param resize_method: type of resizing method\n :param num_channels: expected number of channels in the first image\n :param user_specified_num_channels: did the user specify num channels?\n :return: image object\n\n Helper method to read and resize an image according to model defn.\n If the user doesn't specify a number of channels, we use the first image\n in the dataset as the source of truth. If any image in the dataset\n doesn't have the same number of channels as the first image,\n raise an exception.\n\n If the user specifies a number of channels, we try to convert all the\n images to the specifications by dropping channels/padding 0 channels\n "
img = read_image(img_entry)
img_num_channels = num_channels_in_image(img)
if (img_num_channels == 1):
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(img, (img_height, img_width), resize_method)
if (user_specified_num_channels is True):
if ((num_channels == 1) and ((img_num_channels == 3) or (img_num_channels == 4))):
img = greyscale(img)
img_num_channels = 1
img_padded = np.zeros((img_height, img_width, num_channels), dtype=np.uint8)
min_num_channels = min(num_channels, img_num_channels)
img_padded[:, :, :min_num_channels] = img[:, :, :min_num_channels]
img = img_padded
if (img_num_channels != num_channels):
logger.warning('Image has {0} channels, where as {1} channels are expected. Dropping/adding channels with 0s as appropriate'.format(img_num_channels, num_channels))
elif (img_num_channels != num_channels):
raise ValueError('Image has {0} channels, unlike the first image, which has {1} channels. Make sure all the images have the same number of channels or use the num_channels property in image preprocessing'.format(img_num_channels, num_channels))
if ((img.shape[0] != img_height) or (img.shape[1] != img_width)):
raise ValueError('Images are not of the same size. Expected size is {0}, current image size is {1}.Images are expected to be all of the same size or explicit image width and height are expected to be provided. Additional information: https://ludwig-ai.github.io/ludwig-docs/user_guide/#image-features-preprocessing'.format([img_height, img_width, num_channels], img.shape))
return img | 3,679,320,613,820,813,300 | :param img_entry Union[str, 'numpy.array']: if str file path to the
image else numpy.array of the image itself
:param img_width: expected width of the image
:param img_height: expected height of the image
:param should_resize: Should the image be resized?
:param resize_method: type of resizing method
:param num_channels: expected number of channels in the first image
:param user_specified_num_channels: did the user specify num channels?
:return: image object
Helper method to read and resize an image according to model defn.
If the user doesn't specify a number of channels, we use the first image
in the dataset as the source of truth. If any image in the dataset
doesn't have the same number of channels as the first image,
raise an exception.
If the user specifies a number of channels, we try to convert all the
images to the specifications by dropping channels/padding 0 channels | ludwig/features/image_feature.py | _read_image_and_resize | Yard1/ludwig | python | @staticmethod
def _read_image_and_resize(img_entry: Union[(str, 'numpy.array')], img_width: int, img_height: int, should_resize: bool, num_channels: int, resize_method: str, user_specified_num_channels: int):
"\n :param img_entry Union[str, 'numpy.array']: if str file path to the\n image else numpy.array of the image itself\n :param img_width: expected width of the image\n :param img_height: expected height of the image\n :param should_resize: Should the image be resized?\n :param resize_method: type of resizing method\n :param num_channels: expected number of channels in the first image\n :param user_specified_num_channels: did the user specify num channels?\n :return: image object\n\n Helper method to read and resize an image according to model defn.\n If the user doesn't specify a number of channels, we use the first image\n in the dataset as the source of truth. If any image in the dataset\n doesn't have the same number of channels as the first image,\n raise an exception.\n\n If the user specifies a number of channels, we try to convert all the\n images to the specifications by dropping channels/padding 0 channels\n "
img = read_image(img_entry)
img_num_channels = num_channels_in_image(img)
if (img_num_channels == 1):
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(img, (img_height, img_width), resize_method)
if (user_specified_num_channels is True):
if ((num_channels == 1) and ((img_num_channels == 3) or (img_num_channels == 4))):
img = greyscale(img)
img_num_channels = 1
img_padded = np.zeros((img_height, img_width, num_channels), dtype=np.uint8)
min_num_channels = min(num_channels, img_num_channels)
img_padded[:, :, :min_num_channels] = img[:, :, :min_num_channels]
img = img_padded
if (img_num_channels != num_channels):
logger.warning('Image has {0} channels, where as {1} channels are expected. Dropping/adding channels with 0s as appropriate'.format(img_num_channels, num_channels))
elif (img_num_channels != num_channels):
raise ValueError('Image has {0} channels, unlike the first image, which has {1} channels. Make sure all the images have the same number of channels or use the num_channels property in image preprocessing'.format(img_num_channels, num_channels))
if ((img.shape[0] != img_height) or (img.shape[1] != img_width)):
raise ValueError('Images are not of the same size. Expected size is {0}, current image size is {1}.Images are expected to be all of the same size or explicit image width and height are expected to be provided. Additional information: https://ludwig-ai.github.io/ludwig-docs/user_guide/#image-features-preprocessing'.format([img_height, img_width, num_channels], img.shape))
return img |
@staticmethod
def _finalize_preprocessing_parameters(preprocessing_parameters: dict, first_img_entry: Union[(str, 'numpy.array')], src_path: str, input_feature_col: np.array):
'\n Helper method to determine the height, width and number of channels for\n preprocessing the image data. This is achieved by looking at the\n parameters provided by the user. When there are some missing parameters,\n we fall back on to the first image in the dataset. The assumption being\n that all the images in the data are expected be of the same size with\n the same number of channels\n '
first_image = read_image(first_img_entry)
first_img_height = first_image.shape[0]
first_img_width = first_image.shape[1]
first_img_num_channels = num_channels_in_image(first_image)
should_resize = False
if ((HEIGHT in preprocessing_parameters) or (WIDTH in preprocessing_parameters)):
should_resize = True
try:
height = int(preprocessing_parameters[HEIGHT])
width = int(preprocessing_parameters[WIDTH])
except ValueError as e:
raise ValueError(('Image height and width must be set and have positive integer values: ' + str(e)))
if ((height <= 0) or (width <= 0)):
raise ValueError('Image height and width must be positive integers')
else:
(height, width) = (first_img_height, first_img_width)
if preprocessing_parameters[INFER_IMAGE_DIMENSIONS]:
should_resize = True
sample_size = min(len(input_feature_col), preprocessing_parameters[INFER_IMAGE_SAMPLE_SIZE])
sample_images = [read_image(get_image_from_path(src_path, img)) for img in input_feature_col[:sample_size]]
if sample_images:
height_avg = min((sum((x.shape[0] for x in sample_images)) / len(sample_images)), preprocessing_parameters[INFER_IMAGE_MAX_HEIGHT])
width_avg = min((sum((x.shape[1] for x in sample_images)) / len(sample_images)), preprocessing_parameters[INFER_IMAGE_MAX_WIDTH])
(height, width) = (round(height_avg), round(width_avg))
logger.debug('Inferring height: {0} and width: {1}'.format(height, width))
else:
logger.warning('Sample set for inference is empty, default to height and width of first image')
if (NUM_CHANNELS in preprocessing_parameters):
user_specified_num_channels = True
num_channels = preprocessing_parameters[NUM_CHANNELS]
else:
user_specified_num_channels = False
num_channels = first_img_num_channels
assert isinstance(num_channels, int), ValueError('Number of image channels needs to be an integer')
return (should_resize, width, height, num_channels, user_specified_num_channels, first_image) | 6,889,128,509,968,942,000 | Helper method to determine the height, width and number of channels for
preprocessing the image data. This is achieved by looking at the
parameters provided by the user. When there are some missing parameters,
we fall back on to the first image in the dataset. The assumption being
that all the images in the data are expected be of the same size with
the same number of channels | ludwig/features/image_feature.py | _finalize_preprocessing_parameters | Yard1/ludwig | python | @staticmethod
def _finalize_preprocessing_parameters(preprocessing_parameters: dict, first_img_entry: Union[(str, 'numpy.array')], src_path: str, input_feature_col: np.array):
'\n Helper method to determine the height, width and number of channels for\n preprocessing the image data. This is achieved by looking at the\n parameters provided by the user. When there are some missing parameters,\n we fall back on to the first image in the dataset. The assumption being\n that all the images in the data are expected be of the same size with\n the same number of channels\n '
first_image = read_image(first_img_entry)
first_img_height = first_image.shape[0]
first_img_width = first_image.shape[1]
first_img_num_channels = num_channels_in_image(first_image)
should_resize = False
if ((HEIGHT in preprocessing_parameters) or (WIDTH in preprocessing_parameters)):
should_resize = True
try:
height = int(preprocessing_parameters[HEIGHT])
width = int(preprocessing_parameters[WIDTH])
except ValueError as e:
raise ValueError(('Image height and width must be set and have positive integer values: ' + str(e)))
if ((height <= 0) or (width <= 0)):
raise ValueError('Image height and width must be positive integers')
else:
(height, width) = (first_img_height, first_img_width)
if preprocessing_parameters[INFER_IMAGE_DIMENSIONS]:
should_resize = True
sample_size = min(len(input_feature_col), preprocessing_parameters[INFER_IMAGE_SAMPLE_SIZE])
sample_images = [read_image(get_image_from_path(src_path, img)) for img in input_feature_col[:sample_size]]
if sample_images:
height_avg = min((sum((x.shape[0] for x in sample_images)) / len(sample_images)), preprocessing_parameters[INFER_IMAGE_MAX_HEIGHT])
width_avg = min((sum((x.shape[1] for x in sample_images)) / len(sample_images)), preprocessing_parameters[INFER_IMAGE_MAX_WIDTH])
(height, width) = (round(height_avg), round(width_avg))
logger.debug('Inferring height: {0} and width: {1}'.format(height, width))
else:
logger.warning('Sample set for inference is empty, default to height and width of first image')
if (NUM_CHANNELS in preprocessing_parameters):
user_specified_num_channels = True
num_channels = preprocessing_parameters[NUM_CHANNELS]
else:
user_specified_num_channels = False
num_channels = first_img_num_channels
assert isinstance(num_channels, int), ValueError('Number of image channels needs to be an integer')
return (should_resize, width, height, num_channels, user_specified_num_channels, first_image) |
@lD.log((logBase + '.getAllData'))
def getAllData(logger, query, values=None, dbName=None):
'query data from the database\n \n Query the data over here. If there is a problem with the data, it is going \n to return the value of None, and log the error. Your program needs to check \n whether there was an error with the query by checking for a None return \n value. Note that the location of the dataabses are assumed to be present\n within the file ``../config/db.json``.\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default is None)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Returns\n -------\n list or None\n A list of tuples containing the values is returned. In case\n there is an error, the error will be logged, and a None will\n be return\n '
vals = None
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if (values is None):
cur.execute(query)
else:
cur.execute(query, values)
vals = cur.fetchall()
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\n{values}'.format(query, values))
logger.error(str(e))
try:
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return vals | -2,191,366,141,799,859,500 | query data from the database
Query the data over here. If there is a problem with the data, it is going
to return the value of None, and log the error. Your program needs to check
whether there was an error with the query by checking for a None return
value. Note that the location of the dataabses are assumed to be present
within the file ``../config/db.json``.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
list or None
A list of tuples containing the values is returned. In case
there is an error, the error will be logged, and a None will
be return | src/lib/databaseIO/sqLiteIO.py | getAllData | madelinelimm/newcookiectest | python | @lD.log((logBase + '.getAllData'))
def getAllData(logger, query, values=None, dbName=None):
'query data from the database\n \n Query the data over here. If there is a problem with the data, it is going \n to return the value of None, and log the error. Your program needs to check \n whether there was an error with the query by checking for a None return \n value. Note that the location of the dataabses are assumed to be present\n within the file ``../config/db.json``.\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default is None)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Returns\n -------\n list or None\n A list of tuples containing the values is returned. In case\n there is an error, the error will be logged, and a None will\n be return\n '
vals = None
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if (values is None):
cur.execute(query)
else:
cur.execute(query, values)
vals = cur.fetchall()
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\n{values}'.format(query, values))
logger.error(str(e))
try:
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return vals |
@lD.log((logBase + '.getDataIterator'))
def getDataIterator(logger, query, values=None, chunks=100, dbName=None):
'Create an iterator from a largish query\n \n This is a generator that returns values in chunks of chunksize ``chunks``.\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default \n is None)\n chunks : {number}, optional\n This is the number of rows that the data is going to return at every call\n if __next__() to this function. (the default is 100)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Yields\n ------\n list of tuples\n A list of tuples from the query, with a maximum of ``chunks`` tuples returned\n at one time. \n '
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if (values is None):
cur.execute(query)
else:
cur.execute(query, values)
while True:
vals = cur.fetchmany(chunks)
if (len(vals) == 0):
break
(yield vals)
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
try:
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return | 4,075,178,327,135,300,000 | Create an iterator from a largish query
This is a generator that returns values in chunks of chunksize ``chunks``.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
chunks : {number}, optional
This is the number of rows that the data is going to return at every call
if __next__() to this function. (the default is 100)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Yields
------
list of tuples
A list of tuples from the query, with a maximum of ``chunks`` tuples returned
at one time. | src/lib/databaseIO/sqLiteIO.py | getDataIterator | madelinelimm/newcookiectest | python | @lD.log((logBase + '.getDataIterator'))
def getDataIterator(logger, query, values=None, chunks=100, dbName=None):
'Create an iterator from a largish query\n \n This is a generator that returns values in chunks of chunksize ``chunks``.\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default \n is None)\n chunks : {number}, optional\n This is the number of rows that the data is going to return at every call\n if __next__() to this function. (the default is 100)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Yields\n ------\n list of tuples\n A list of tuples from the query, with a maximum of ``chunks`` tuples returned\n at one time. \n '
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if (values is None):
cur.execute(query)
else:
cur.execute(query, values)
while True:
vals = cur.fetchmany(chunks)
if (len(vals) == 0):
break
(yield vals)
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
try:
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return |
@lD.log((logBase + '.getSingleDataIterator'))
def getSingleDataIterator(logger, query, values=None, dbName=None):
'Create an iterator from a largish query\n \n This is a generator that returns values in chunks of chunksize 1.\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default \n is None)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Yields\n ------\n list of tuples\n A list of tuples from the query, with a maximum of ``chunks`` tuples returned\n at one time. \n '
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if (values is None):
cur.execute(query)
else:
cur.execute(query, values)
while True:
vals = cur.fetchone()
if (vals is None):
break
(yield vals)
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
try:
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return | 1,734,528,024,462,529,000 | Create an iterator from a largish query
This is a generator that returns values in chunks of chunksize 1.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Yields
------
list of tuples
A list of tuples from the query, with a maximum of ``chunks`` tuples returned
at one time. | src/lib/databaseIO/sqLiteIO.py | getSingleDataIterator | madelinelimm/newcookiectest | python | @lD.log((logBase + '.getSingleDataIterator'))
def getSingleDataIterator(logger, query, values=None, dbName=None):
'Create an iterator from a largish query\n \n This is a generator that returns values in chunks of chunksize 1.\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default \n is None)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Yields\n ------\n list of tuples\n A list of tuples from the query, with a maximum of ``chunks`` tuples returned\n at one time. \n '
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if (values is None):
cur.execute(query)
else:
cur.execute(query, values)
while True:
vals = cur.fetchone()
if (vals is None):
break
(yield vals)
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
try:
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return |
@lD.log((logBase + '.commitData'))
def commitData(logger, query, values=None, dbName=None):
'query data from the database\n \n Query the data over here. If there is a problem with\n the data, it is going to return the value of ``None``, and\n log the error. Your program needs to check whether \n there was an error with the query by checking for a ``None``\n return value\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default \n is None)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Returns\n -------\n True or None\n On successful completion, a ``True`` is returned. In case\n there is an error, the error will be logged, and a ``None`` will\n be returnd\n '
vals = True
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return None
try:
if (values is None):
cur.execute(query)
else:
cur.execute(query, values)
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
vals = None
try:
conn.commit()
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return vals | 5,148,623,577,417,340,000 | query data from the database
Query the data over here. If there is a problem with
the data, it is going to return the value of ``None``, and
log the error. Your program needs to check whether
there was an error with the query by checking for a ``None``
return value
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
True or None
On successful completion, a ``True`` is returned. In case
there is an error, the error will be logged, and a ``None`` will
be returnd | src/lib/databaseIO/sqLiteIO.py | commitData | madelinelimm/newcookiectest | python | @lD.log((logBase + '.commitData'))
def commitData(logger, query, values=None, dbName=None):
'query data from the database\n \n Query the data over here. If there is a problem with\n the data, it is going to return the value of ``None``, and\n log the error. Your program needs to check whether \n there was an error with the query by checking for a ``None``\n return value\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default \n is None)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Returns\n -------\n True or None\n On successful completion, a ``True`` is returned. In case\n there is an error, the error will be logged, and a ``None`` will\n be returnd\n '
vals = True
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return None
try:
if (values is None):
cur.execute(query)
else:
cur.execute(query, values)
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
vals = None
try:
conn.commit()
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return vals |
@lD.log((logBase + '.commitDataList'))
def commitDataList(logger, query, values, dbName=None):
'query data from the database\n \n Query the data over here. If there is a problem with\n the data, it is going to return the value of None, and\n log the error. Your program needs to check whether \n there was an error with the query by checking for a ``None``\n return value\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default \n is None)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Returns\n -------\n True or None\n A successful completion of this function returns a ``True``. \n In case there is an error, the error will be logged, and a ``None`` will\n be returned\n '
val = True
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return None
try:
cur.executemany(query, values)
except Exception as e:
logger.error('Unable to execute query for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
val = None
try:
conn.commit()
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return None
return val | 4,836,143,481,288,761,000 | query data from the database
Query the data over here. If there is a problem with
the data, it is going to return the value of None, and
log the error. Your program needs to check whether
there was an error with the query by checking for a ``None``
return value
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
True or None
A successful completion of this function returns a ``True``.
In case there is an error, the error will be logged, and a ``None`` will
be returned | src/lib/databaseIO/sqLiteIO.py | commitDataList | madelinelimm/newcookiectest | python | @lD.log((logBase + '.commitDataList'))
def commitDataList(logger, query, values, dbName=None):
'query data from the database\n \n Query the data over here. If there is a problem with\n the data, it is going to return the value of None, and\n log the error. Your program needs to check whether \n there was an error with the query by checking for a ``None``\n return value\n \n Parameters\n ----------\n logger : {logging.logger}\n logging element \n query : {str}\n The query to be made to the databse\n values : {tuple or list-like}, optional\n Additional values to be passed to the query (the default \n is None)\n dbName : {str or None}, optional\n The name of the database to use. If this is None, the function will \n attempt to read the name from the ``defaultDB`` item within the \n file ``../config/db.json``. \n \n Returns\n -------\n True or None\n A successful completion of this function returns a ``True``. \n In case there is an error, the error will be logged, and a ``None`` will\n be returned\n '
val = True
try:
db = jsonref.load(open('../config/db.json'))
if ((dbName is None) and ('defaultDB' in db)):
dbName = db['defaultDB']
if (dbName is None):
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return None
try:
cur.executemany(query, values)
except Exception as e:
logger.error('Unable to execute query for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
val = None
try:
conn.commit()
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return None
return val |
def a_vector_OLS_and_LP(m_dict, bounds, boundedness, term_limit, term_lower_bound, fit_method, alpha, diff_error=0.001, diff_step=0.001):
" Main workhorse function of pymetalog package.\n Called during metalog.__init__ method call.\n\n Args:\n m_dict (:obj:`dict` with keys ['params', 'dataValues', 'Y']): Initialized output_dict variable from metalog class.\n - m_dict['params']: (:obj:`dict` with keys ['bounds', 'boundedness', 'term_limit', 'term_lower_bound', 'step_len', 'fit_method']):\n * 'bounds': metalog.bounds\n * 'boundedness': metalog.boundedness\n * 'term_limit': metalog.term_limit\n * 'term_lower_bound': metalog.term_lower_bound\n * 'step_len': metalog.step_len\n * 'fit_method': metalog.fit_method\n\n - m_dict['dataValues']: (:obj:`pandas.DataFrame` with columns ['x','probs','z'] of type numeric):\n * 'x': metalog.x\n * 'probs': metalog.probs\n * 'z': column calculated in metalog.append_zvector method\n - depends on metalog.boundedness attribute\n - metalog.boundedness = 'u':\n * 'z' = metalog.x\n - metalog.boundedness = 'sl':\n * 'z' = log( (metalog.x-lower_bound) )\n - metalog.boundedness = 'su':\n * 'z' = = log( (upper_bound-metalog.x) )\n - metalog.boundedness = 'b':\n * 'z' = log( (metalog.x-lower_bound) / (upper_bound-metalog.x) )\n\n - m_dict['Y']: (:obj:`pandas.DataFrame` with columns ['y1','y2','y3','y4', ... ,'yn'] of type numeric):\n * 'y1': numpy.array of ones with length equal to len(x)\n * 'y2': numpy.array of numeric values equal to the term attached to s in the logistic quantile function np.log(m_dict['dataValues']['probs'] / (1 - m_dict['dataValues']['probs']))\n * 'y3': numpy.array of numeric values (m_dict['dataValues']['probs'] - 0.5) * m_dict['Y']['y2']\n * 'y4': numpy.array of numeric values m_dict['Y']['y4'] = m_dict['dataValues']['probs'] - 0.5\n * 'yn': numpy.array of numeric values:\n - if n in 'yn' is odd,\n m_dict['Y']['yn'] = m_dict['Y']['y4']**(int(i//2))\n - if n in 'yn' is even,\n zn = 'y' + str(n-1)\n m_dict['Y'][yn] = m_dict['Y']['y2'] * m_dict['Y'][zn]\n\n bounds (:obj:`list`): Upper and lower limits to filter the data with before calculating metalog quantiles/pdfs.\n - should be set in conjunction with the `boundedness` parameter\n\n boundedness (:obj:`str`): String that is used to specify the type of metalog to fit.\n - must be in set ('u','sl','su','b')\n - Default: 'u'\n * Fits an unbounded metalog\n - 'sl' fits a strictly lower bounded metalog\n * len(bounds) must == 1\n - 'su' fits a strictly upper bounded metalog\n * len(bounds) must == 1\n - 'b' fits a upper/lower bounded metalog\n * len(bounds) must == 2\n * bounds[1] must be > bounds[0]\n\n term_limit (:obj:`int`): The upper limit of the range of metalog terms to use to fit the data.\n - strictly > term_lower_bound\n - in range [3,30]\n\n term_lower_bound (:obj:`int`): The lower limit of the range of metalog terms to use to fit the data.\n - strictly < term_limit\n - in range [2,29]\n\n fit_method (:obj:`str`): Fit method to use to fit metalog distribution.\n - must be in set ('any','OLS','LP','MLE')\n - Default: 'any'\n * first tries 'OLS' method than 'LP'\n - 'OLS' only tries to fit by solving directly for a coefficients using ordinary least squares method\n - 'LP' only tries to estimate fit using simplex linear program optimization routine\n - 'MLE' first tries 'OLS' method than falls back to a maximum likelihood estimation routine\n\n alpha (:obj:`float`, optional): Regularization term to add to OLS fit\n - strictly >= 0.\n - should be set in conjunction with `penalty` parameter\n - Default: 0. (no regularization, OLS)\n\n diff_error (:obj:`float`, optional): Value used to in scipy.optimize.linprog method call\n to init the array of values representing the\n upper-bound of each inequality constraint (row) in A_ub.\n - #TODO: Insert maths\n\n diff_step (:obj:`float`, optional): Value passed to `step_len` parameter in support.py diffMatMetalog method call\n defines the bin width for the Reimann sum of the differences differentiation method\n - diffMatMetalog differentiates the metalog pdf\n * Differentiation reference: https://math.stackexchange.com/a/313135\n Returns:\n m_dict: (:obj:`dict` with keys ['params', 'dataValues', 'Y', 'A', 'M', 'Validation'])\n - m_dict['A']: (:obj:`pandas.DataFrame` with columns ['a2','a3', ... ,'an'] of type numeric):\n * a2, a3, ... , an are our a coefficients returned by the method specified in `fit_method`\n\n - m_dict['M']: (:obj:`pandas.DataFrame` with columns 0:'pdf_1',1:'cdf_1',2:'pdf_2',3:'cdf_2',\n ...,((2*(term_limit-term_lower_bound))+1)-1:'pdf_n',\n ((2*(term_limit-term_lower_bound))+1):'cdf_n'\n where n is the total number of metalog fits determined by (term_limit-term_lower_bound)+1\n )\n * pdf_1, pdf_2, ... , pdf_n are the metalog pdfs returned by pdf_quantile_builder.pdfMetalog method\n * cdf_1, cdf_2, ... , cdf_n are the metalog quantiles returned by pdf_quantile_builder.quantileMetalog method\n \n - m_dict['y']: (:obj: `numpy.ndarray` of type float):\n * Array of bin widths for both the pdf_n and cdf_n\n\n - m_dict['Validation']: (:obj:`pandas.DataFrame` with columns ['term', 'valid', 'method'] of type str):\n * 'term': each metalog estimation given a number of terms\n * 'valid': boolean flag indicating if the metalog estimation was valid or not\n * 'method': a string indicating which method was used for the metalog estimation\n\n "
A = pd.DataFrame()
c_a_names = []
c_m_names = []
Mh = pd.DataFrame()
Validation = pd.DataFrame()
df_MH_temp_list = list()
df_A_temp_list = list()
df_Validation_temp_list = list()
for i in range(term_lower_bound, (term_limit + 1)):
Y = m_dict['Y'].iloc[:, 0:i]
eye = np.eye(Y.shape[1])
z = m_dict['dataValues']['z']
y = m_dict['dataValues']['probs']
step_len = m_dict['params']['step_len']
methodFit = 'OLS'
a_name = ('a' + str(i))
m_name = ('m' + str(i))
M_name = ('M' + str(i))
c_m_names = np.append(c_m_names, [m_name, M_name])
c_a_names = np.append(c_a_names, a_name)
if ((fit_method == 'any') or (fit_method == 'MLE')):
try:
temp = np.dot(np.dot(np.linalg.inv((np.dot(Y.T, Y) + (alpha * eye))), Y.T), z)
except:
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
methodFit = 'Linear Program'
if (fit_method == 'OLS'):
try:
temp = np.dot(np.dot(np.linalg.inv((np.dot(Y.T, Y) + (alpha * eye))), Y.T), z)
except:
raise RuntimeError('OLS was unable to solve infeasible or poorly formulated problem')
if (fit_method == 'LP'):
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
methodFit = 'Linear Program'
if (fit_method == 'MLE'):
temp = a_vector_MLE(temp, y, i, m_dict, bounds, boundedness)
temp = np.append(temp, np.zeros((term_limit - i)))
if (len(z) < 100):
y2 = np.linspace(step_len, (1 - step_len), int(((1 - step_len) / step_len)))
tailstep = (step_len / 10)
y1 = np.linspace(tailstep, (min(y2) - tailstep), int(((min(y2) - tailstep) / tailstep)))
y3 = np.linspace((max(y2) + tailstep), (max(y2) + (tailstep * 9)), int(((tailstep * 9) / tailstep)))
y = np.hstack((y1, y2, y3))
temp_dict = pdf_quantile_builder(temp, y=y, term_limit=i, bounds=bounds, boundedness=boundedness)
if ((temp_dict['valid'] == 'no') and (fit_method != 'OLS')):
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
temp = np.append(temp, np.zeros((term_limit - i)))
methodFit = 'Linear Program'
temp_dict = pdf_quantile_builder(temp, y=y, term_limit=i, bounds=bounds, boundedness=boundedness)
df_MH_temp_list.append(pd.DataFrame(temp_dict['m']))
df_MH_temp_list.append(pd.DataFrame(temp_dict['M']))
df_A_temp_list.append(pd.DataFrame(temp))
tempValidation = pd.DataFrame(data={'term': [i], 'valid': [temp_dict['valid']], 'method': [methodFit]})
df_Validation_temp_list.append(tempValidation)
Validation = pd.concat(df_Validation_temp_list, axis=0)
Mh = pd.concat(df_MH_temp_list, axis=1)
A = pd.concat(df_A_temp_list, axis=1)
A.columns = c_a_names
Mh.columns = c_m_names
m_dict['A'] = A
m_dict['M'] = Mh
m_dict['M']['y'] = temp_dict['y']
m_dict['Validation'] = Validation
A = np.column_stack((np.repeat(1.0, len(A)), A))
Est = np.dot(m_dict['Y'], A)
ncols = A.shape[1]
Z = np.column_stack((np.array(m_dict['dataValues']['z']), np.repeat(m_dict['dataValues']['z'].values, (ncols - 1)).reshape(len(m_dict['dataValues']['z']), (ncols - 1))))
m_dict['square_residual_error'] = ((Z - Est) ** 2).sum(axis=1)
return m_dict | 6,141,652,472,069,626,000 | Main workhorse function of pymetalog package.
Called during metalog.__init__ method call.
Args:
m_dict (:obj:`dict` with keys ['params', 'dataValues', 'Y']): Initialized output_dict variable from metalog class.
- m_dict['params']: (:obj:`dict` with keys ['bounds', 'boundedness', 'term_limit', 'term_lower_bound', 'step_len', 'fit_method']):
* 'bounds': metalog.bounds
* 'boundedness': metalog.boundedness
* 'term_limit': metalog.term_limit
* 'term_lower_bound': metalog.term_lower_bound
* 'step_len': metalog.step_len
* 'fit_method': metalog.fit_method
- m_dict['dataValues']: (:obj:`pandas.DataFrame` with columns ['x','probs','z'] of type numeric):
* 'x': metalog.x
* 'probs': metalog.probs
* 'z': column calculated in metalog.append_zvector method
- depends on metalog.boundedness attribute
- metalog.boundedness = 'u':
* 'z' = metalog.x
- metalog.boundedness = 'sl':
* 'z' = log( (metalog.x-lower_bound) )
- metalog.boundedness = 'su':
* 'z' = = log( (upper_bound-metalog.x) )
- metalog.boundedness = 'b':
* 'z' = log( (metalog.x-lower_bound) / (upper_bound-metalog.x) )
- m_dict['Y']: (:obj:`pandas.DataFrame` with columns ['y1','y2','y3','y4', ... ,'yn'] of type numeric):
* 'y1': numpy.array of ones with length equal to len(x)
* 'y2': numpy.array of numeric values equal to the term attached to s in the logistic quantile function np.log(m_dict['dataValues']['probs'] / (1 - m_dict['dataValues']['probs']))
* 'y3': numpy.array of numeric values (m_dict['dataValues']['probs'] - 0.5) * m_dict['Y']['y2']
* 'y4': numpy.array of numeric values m_dict['Y']['y4'] = m_dict['dataValues']['probs'] - 0.5
* 'yn': numpy.array of numeric values:
- if n in 'yn' is odd,
m_dict['Y']['yn'] = m_dict['Y']['y4']**(int(i//2))
- if n in 'yn' is even,
zn = 'y' + str(n-1)
m_dict['Y'][yn] = m_dict['Y']['y2'] * m_dict['Y'][zn]
bounds (:obj:`list`): Upper and lower limits to filter the data with before calculating metalog quantiles/pdfs.
- should be set in conjunction with the `boundedness` parameter
boundedness (:obj:`str`): String that is used to specify the type of metalog to fit.
- must be in set ('u','sl','su','b')
- Default: 'u'
* Fits an unbounded metalog
- 'sl' fits a strictly lower bounded metalog
* len(bounds) must == 1
- 'su' fits a strictly upper bounded metalog
* len(bounds) must == 1
- 'b' fits a upper/lower bounded metalog
* len(bounds) must == 2
* bounds[1] must be > bounds[0]
term_limit (:obj:`int`): The upper limit of the range of metalog terms to use to fit the data.
- strictly > term_lower_bound
- in range [3,30]
term_lower_bound (:obj:`int`): The lower limit of the range of metalog terms to use to fit the data.
- strictly < term_limit
- in range [2,29]
fit_method (:obj:`str`): Fit method to use to fit metalog distribution.
- must be in set ('any','OLS','LP','MLE')
- Default: 'any'
* first tries 'OLS' method than 'LP'
- 'OLS' only tries to fit by solving directly for a coefficients using ordinary least squares method
- 'LP' only tries to estimate fit using simplex linear program optimization routine
- 'MLE' first tries 'OLS' method than falls back to a maximum likelihood estimation routine
alpha (:obj:`float`, optional): Regularization term to add to OLS fit
- strictly >= 0.
- should be set in conjunction with `penalty` parameter
- Default: 0. (no regularization, OLS)
diff_error (:obj:`float`, optional): Value used to in scipy.optimize.linprog method call
to init the array of values representing the
upper-bound of each inequality constraint (row) in A_ub.
- #TODO: Insert maths
diff_step (:obj:`float`, optional): Value passed to `step_len` parameter in support.py diffMatMetalog method call
defines the bin width for the Reimann sum of the differences differentiation method
- diffMatMetalog differentiates the metalog pdf
* Differentiation reference: https://math.stackexchange.com/a/313135
Returns:
m_dict: (:obj:`dict` with keys ['params', 'dataValues', 'Y', 'A', 'M', 'Validation'])
- m_dict['A']: (:obj:`pandas.DataFrame` with columns ['a2','a3', ... ,'an'] of type numeric):
* a2, a3, ... , an are our a coefficients returned by the method specified in `fit_method`
- m_dict['M']: (:obj:`pandas.DataFrame` with columns 0:'pdf_1',1:'cdf_1',2:'pdf_2',3:'cdf_2',
...,((2*(term_limit-term_lower_bound))+1)-1:'pdf_n',
((2*(term_limit-term_lower_bound))+1):'cdf_n'
where n is the total number of metalog fits determined by (term_limit-term_lower_bound)+1
)
* pdf_1, pdf_2, ... , pdf_n are the metalog pdfs returned by pdf_quantile_builder.pdfMetalog method
* cdf_1, cdf_2, ... , cdf_n are the metalog quantiles returned by pdf_quantile_builder.quantileMetalog method
- m_dict['y']: (:obj: `numpy.ndarray` of type float):
* Array of bin widths for both the pdf_n and cdf_n
- m_dict['Validation']: (:obj:`pandas.DataFrame` with columns ['term', 'valid', 'method'] of type str):
* 'term': each metalog estimation given a number of terms
* 'valid': boolean flag indicating if the metalog estimation was valid or not
* 'method': a string indicating which method was used for the metalog estimation | pymetalog/a_vector.py | a_vector_OLS_and_LP | sives5/pymetalog | python | def a_vector_OLS_and_LP(m_dict, bounds, boundedness, term_limit, term_lower_bound, fit_method, alpha, diff_error=0.001, diff_step=0.001):
" Main workhorse function of pymetalog package.\n Called during metalog.__init__ method call.\n\n Args:\n m_dict (:obj:`dict` with keys ['params', 'dataValues', 'Y']): Initialized output_dict variable from metalog class.\n - m_dict['params']: (:obj:`dict` with keys ['bounds', 'boundedness', 'term_limit', 'term_lower_bound', 'step_len', 'fit_method']):\n * 'bounds': metalog.bounds\n * 'boundedness': metalog.boundedness\n * 'term_limit': metalog.term_limit\n * 'term_lower_bound': metalog.term_lower_bound\n * 'step_len': metalog.step_len\n * 'fit_method': metalog.fit_method\n\n - m_dict['dataValues']: (:obj:`pandas.DataFrame` with columns ['x','probs','z'] of type numeric):\n * 'x': metalog.x\n * 'probs': metalog.probs\n * 'z': column calculated in metalog.append_zvector method\n - depends on metalog.boundedness attribute\n - metalog.boundedness = 'u':\n * 'z' = metalog.x\n - metalog.boundedness = 'sl':\n * 'z' = log( (metalog.x-lower_bound) )\n - metalog.boundedness = 'su':\n * 'z' = = log( (upper_bound-metalog.x) )\n - metalog.boundedness = 'b':\n * 'z' = log( (metalog.x-lower_bound) / (upper_bound-metalog.x) )\n\n - m_dict['Y']: (:obj:`pandas.DataFrame` with columns ['y1','y2','y3','y4', ... ,'yn'] of type numeric):\n * 'y1': numpy.array of ones with length equal to len(x)\n * 'y2': numpy.array of numeric values equal to the term attached to s in the logistic quantile function np.log(m_dict['dataValues']['probs'] / (1 - m_dict['dataValues']['probs']))\n * 'y3': numpy.array of numeric values (m_dict['dataValues']['probs'] - 0.5) * m_dict['Y']['y2']\n * 'y4': numpy.array of numeric values m_dict['Y']['y4'] = m_dict['dataValues']['probs'] - 0.5\n * 'yn': numpy.array of numeric values:\n - if n in 'yn' is odd,\n m_dict['Y']['yn'] = m_dict['Y']['y4']**(int(i//2))\n - if n in 'yn' is even,\n zn = 'y' + str(n-1)\n m_dict['Y'][yn] = m_dict['Y']['y2'] * m_dict['Y'][zn]\n\n bounds (:obj:`list`): Upper and lower limits to filter the data with before calculating metalog quantiles/pdfs.\n - should be set in conjunction with the `boundedness` parameter\n\n boundedness (:obj:`str`): String that is used to specify the type of metalog to fit.\n - must be in set ('u','sl','su','b')\n - Default: 'u'\n * Fits an unbounded metalog\n - 'sl' fits a strictly lower bounded metalog\n * len(bounds) must == 1\n - 'su' fits a strictly upper bounded metalog\n * len(bounds) must == 1\n - 'b' fits a upper/lower bounded metalog\n * len(bounds) must == 2\n * bounds[1] must be > bounds[0]\n\n term_limit (:obj:`int`): The upper limit of the range of metalog terms to use to fit the data.\n - strictly > term_lower_bound\n - in range [3,30]\n\n term_lower_bound (:obj:`int`): The lower limit of the range of metalog terms to use to fit the data.\n - strictly < term_limit\n - in range [2,29]\n\n fit_method (:obj:`str`): Fit method to use to fit metalog distribution.\n - must be in set ('any','OLS','LP','MLE')\n - Default: 'any'\n * first tries 'OLS' method than 'LP'\n - 'OLS' only tries to fit by solving directly for a coefficients using ordinary least squares method\n - 'LP' only tries to estimate fit using simplex linear program optimization routine\n - 'MLE' first tries 'OLS' method than falls back to a maximum likelihood estimation routine\n\n alpha (:obj:`float`, optional): Regularization term to add to OLS fit\n - strictly >= 0.\n - should be set in conjunction with `penalty` parameter\n - Default: 0. (no regularization, OLS)\n\n diff_error (:obj:`float`, optional): Value used to in scipy.optimize.linprog method call\n to init the array of values representing the\n upper-bound of each inequality constraint (row) in A_ub.\n - #TODO: Insert maths\n\n diff_step (:obj:`float`, optional): Value passed to `step_len` parameter in support.py diffMatMetalog method call\n defines the bin width for the Reimann sum of the differences differentiation method\n - diffMatMetalog differentiates the metalog pdf\n * Differentiation reference: https://math.stackexchange.com/a/313135\n Returns:\n m_dict: (:obj:`dict` with keys ['params', 'dataValues', 'Y', 'A', 'M', 'Validation'])\n - m_dict['A']: (:obj:`pandas.DataFrame` with columns ['a2','a3', ... ,'an'] of type numeric):\n * a2, a3, ... , an are our a coefficients returned by the method specified in `fit_method`\n\n - m_dict['M']: (:obj:`pandas.DataFrame` with columns 0:'pdf_1',1:'cdf_1',2:'pdf_2',3:'cdf_2',\n ...,((2*(term_limit-term_lower_bound))+1)-1:'pdf_n',\n ((2*(term_limit-term_lower_bound))+1):'cdf_n'\n where n is the total number of metalog fits determined by (term_limit-term_lower_bound)+1\n )\n * pdf_1, pdf_2, ... , pdf_n are the metalog pdfs returned by pdf_quantile_builder.pdfMetalog method\n * cdf_1, cdf_2, ... , cdf_n are the metalog quantiles returned by pdf_quantile_builder.quantileMetalog method\n \n - m_dict['y']: (:obj: `numpy.ndarray` of type float):\n * Array of bin widths for both the pdf_n and cdf_n\n\n - m_dict['Validation']: (:obj:`pandas.DataFrame` with columns ['term', 'valid', 'method'] of type str):\n * 'term': each metalog estimation given a number of terms\n * 'valid': boolean flag indicating if the metalog estimation was valid or not\n * 'method': a string indicating which method was used for the metalog estimation\n\n "
A = pd.DataFrame()
c_a_names = []
c_m_names = []
Mh = pd.DataFrame()
Validation = pd.DataFrame()
df_MH_temp_list = list()
df_A_temp_list = list()
df_Validation_temp_list = list()
for i in range(term_lower_bound, (term_limit + 1)):
Y = m_dict['Y'].iloc[:, 0:i]
eye = np.eye(Y.shape[1])
z = m_dict['dataValues']['z']
y = m_dict['dataValues']['probs']
step_len = m_dict['params']['step_len']
methodFit = 'OLS'
a_name = ('a' + str(i))
m_name = ('m' + str(i))
M_name = ('M' + str(i))
c_m_names = np.append(c_m_names, [m_name, M_name])
c_a_names = np.append(c_a_names, a_name)
if ((fit_method == 'any') or (fit_method == 'MLE')):
try:
temp = np.dot(np.dot(np.linalg.inv((np.dot(Y.T, Y) + (alpha * eye))), Y.T), z)
except:
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
methodFit = 'Linear Program'
if (fit_method == 'OLS'):
try:
temp = np.dot(np.dot(np.linalg.inv((np.dot(Y.T, Y) + (alpha * eye))), Y.T), z)
except:
raise RuntimeError('OLS was unable to solve infeasible or poorly formulated problem')
if (fit_method == 'LP'):
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
methodFit = 'Linear Program'
if (fit_method == 'MLE'):
temp = a_vector_MLE(temp, y, i, m_dict, bounds, boundedness)
temp = np.append(temp, np.zeros((term_limit - i)))
if (len(z) < 100):
y2 = np.linspace(step_len, (1 - step_len), int(((1 - step_len) / step_len)))
tailstep = (step_len / 10)
y1 = np.linspace(tailstep, (min(y2) - tailstep), int(((min(y2) - tailstep) / tailstep)))
y3 = np.linspace((max(y2) + tailstep), (max(y2) + (tailstep * 9)), int(((tailstep * 9) / tailstep)))
y = np.hstack((y1, y2, y3))
temp_dict = pdf_quantile_builder(temp, y=y, term_limit=i, bounds=bounds, boundedness=boundedness)
if ((temp_dict['valid'] == 'no') and (fit_method != 'OLS')):
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
temp = np.append(temp, np.zeros((term_limit - i)))
methodFit = 'Linear Program'
temp_dict = pdf_quantile_builder(temp, y=y, term_limit=i, bounds=bounds, boundedness=boundedness)
df_MH_temp_list.append(pd.DataFrame(temp_dict['m']))
df_MH_temp_list.append(pd.DataFrame(temp_dict['M']))
df_A_temp_list.append(pd.DataFrame(temp))
tempValidation = pd.DataFrame(data={'term': [i], 'valid': [temp_dict['valid']], 'method': [methodFit]})
df_Validation_temp_list.append(tempValidation)
Validation = pd.concat(df_Validation_temp_list, axis=0)
Mh = pd.concat(df_MH_temp_list, axis=1)
A = pd.concat(df_A_temp_list, axis=1)
A.columns = c_a_names
Mh.columns = c_m_names
m_dict['A'] = A
m_dict['M'] = Mh
m_dict['M']['y'] = temp_dict['y']
m_dict['Validation'] = Validation
A = np.column_stack((np.repeat(1.0, len(A)), A))
Est = np.dot(m_dict['Y'], A)
ncols = A.shape[1]
Z = np.column_stack((np.array(m_dict['dataValues']['z']), np.repeat(m_dict['dataValues']['z'].values, (ncols - 1)).reshape(len(m_dict['dataValues']['z']), (ncols - 1))))
m_dict['square_residual_error'] = ((Z - Est) ** 2).sum(axis=1)
return m_dict |
def a_vector_LP(m_dict, term_limit, term_lower_bound, diff_error=0.001, diff_step=0.001):
'TODO: write docstring\n\n '
cnames = np.array([])
for i in range(term_lower_bound, (term_limit + 1)):
Y = m_dict['Y'].iloc[:, 0:i]
z = m_dict['dataValues']['z']
Y_neg = (- Y)
new_Y = pd.DataFrame({'y1': Y.iloc[:, 0], 'y1_neg': Y_neg.iloc[:, 0]})
for c in range(1, len(Y.iloc[0, :])):
new_Y[('y' + str((c + 1)))] = Y.iloc[:, c]
new_Y[(('y' + str((c + 1))) + '_neg')] = Y_neg.iloc[:, c]
a = np.array([''.join(['a', str(i)])])
cnames = np.append(cnames, a, axis=0)
error_mat = np.array([])
for j in range(1, (len(Y.iloc[:, 0]) + 1)):
front_zeros = np.zeros((2 * (j - 1)))
ones = [1, (- 1)]
trail_zeroes = np.zeros((2 * (len(Y.iloc[:, 1]) - j)))
if (j == 1):
error_vars = np.append(ones, trail_zeroes)
elif (j != 1):
error_vars = np.append(front_zeros, ones)
error_vars = np.append(error_vars, trail_zeroes)
if (error_mat.size == 0):
error_mat = np.append(error_mat, error_vars, axis=0)
else:
error_mat = np.vstack((error_mat, error_vars))
new = pd.concat((pd.DataFrame(data=error_mat), new_Y), axis=1)
diff_mat = diffMatMetalog(i, diff_step)
diff_zeros = []
for t in range(0, len(diff_mat.iloc[:, 0])):
zeros_temp = np.zeros((2 * len(Y.iloc[:, 0])))
if (np.size(diff_zeros) == 0):
diff_zeros = zeros_temp
else:
diff_zeros = np.vstack((zeros_temp, diff_zeros))
diff_mat = np.concatenate((diff_zeros, diff_mat), axis=1)
lp_mat = np.concatenate((new, diff_mat), axis=0)
c = np.append(np.ones((2 * len(Y.iloc[:, 1]))), np.zeros((2 * i)))
A_eq = lp_mat[:len(Y.iloc[:, 1]), :]
A_ub = ((- 1) * lp_mat[len(Y.iloc[:, 1]):, :])
b_eq = z
b_ub = ((- 1) * np.repeat(diff_error, len(diff_mat[:, 0])))
lp_sol = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, method='simplex', options={'maxiter': 5000, 'tol': 1e-05, 'disp': False})
tempLP = lp_sol.x[(2 * len(Y.iloc[:, 1])):(len(lp_sol.x) + 1)]
temp = []
for r in range(0, (len(tempLP) // 2)):
temp.append((tempLP[(r * 2)] - tempLP[((2 * r) + 1)]))
return temp | -768,892,783,951,292,000 | TODO: write docstring | pymetalog/a_vector.py | a_vector_LP | sives5/pymetalog | python | def a_vector_LP(m_dict, term_limit, term_lower_bound, diff_error=0.001, diff_step=0.001):
'\n\n '
cnames = np.array([])
for i in range(term_lower_bound, (term_limit + 1)):
Y = m_dict['Y'].iloc[:, 0:i]
z = m_dict['dataValues']['z']
Y_neg = (- Y)
new_Y = pd.DataFrame({'y1': Y.iloc[:, 0], 'y1_neg': Y_neg.iloc[:, 0]})
for c in range(1, len(Y.iloc[0, :])):
new_Y[('y' + str((c + 1)))] = Y.iloc[:, c]
new_Y[(('y' + str((c + 1))) + '_neg')] = Y_neg.iloc[:, c]
a = np.array([.join(['a', str(i)])])
cnames = np.append(cnames, a, axis=0)
error_mat = np.array([])
for j in range(1, (len(Y.iloc[:, 0]) + 1)):
front_zeros = np.zeros((2 * (j - 1)))
ones = [1, (- 1)]
trail_zeroes = np.zeros((2 * (len(Y.iloc[:, 1]) - j)))
if (j == 1):
error_vars = np.append(ones, trail_zeroes)
elif (j != 1):
error_vars = np.append(front_zeros, ones)
error_vars = np.append(error_vars, trail_zeroes)
if (error_mat.size == 0):
error_mat = np.append(error_mat, error_vars, axis=0)
else:
error_mat = np.vstack((error_mat, error_vars))
new = pd.concat((pd.DataFrame(data=error_mat), new_Y), axis=1)
diff_mat = diffMatMetalog(i, diff_step)
diff_zeros = []
for t in range(0, len(diff_mat.iloc[:, 0])):
zeros_temp = np.zeros((2 * len(Y.iloc[:, 0])))
if (np.size(diff_zeros) == 0):
diff_zeros = zeros_temp
else:
diff_zeros = np.vstack((zeros_temp, diff_zeros))
diff_mat = np.concatenate((diff_zeros, diff_mat), axis=1)
lp_mat = np.concatenate((new, diff_mat), axis=0)
c = np.append(np.ones((2 * len(Y.iloc[:, 1]))), np.zeros((2 * i)))
A_eq = lp_mat[:len(Y.iloc[:, 1]), :]
A_ub = ((- 1) * lp_mat[len(Y.iloc[:, 1]):, :])
b_eq = z
b_ub = ((- 1) * np.repeat(diff_error, len(diff_mat[:, 0])))
lp_sol = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, method='simplex', options={'maxiter': 5000, 'tol': 1e-05, 'disp': False})
tempLP = lp_sol.x[(2 * len(Y.iloc[:, 1])):(len(lp_sol.x) + 1)]
temp = []
for r in range(0, (len(tempLP) // 2)):
temp.append((tempLP[(r * 2)] - tempLP[((2 * r) + 1)]))
return temp |
def a_vector_MLE(a, y, term, m_dict, bounds, boundedness):
'TODO: write docstring\n\n '
ym = [newtons_method_metalog(a, xi, term, bounds, boundedness) for xi in m_dict['dataValues']['x']]
def MLE_quantile_constraints(x):
M = [quantileMetalog(x[:term], yi, term, bounds=bounds, boundedness=boundedness) for yi in x[term:]]
return (m_dict['dataValues']['x'] - M)
def MLE_objective_function(x, y, term, m_dict):
return (- np.sum([np.log10(pdfMetalog(x[:term], yi, term, bounds, boundedness)) for yi in np.absolute(x[term:])]))
m_dict[str(('MLE' + str(term)))] = {}
x0 = np.hstack((a[:term], ym))
m_dict[str(('MLE' + str(term)))]['oldobj'] = (- MLE_objective_function(x0, y, term, m_dict))
bnd = ((((None, None),) * len(a)) + (((0, 1),) * (len(x0) - len(a))))
con = NonlinearConstraint(MLE_quantile_constraints, 0, 0)
mle = minimize(MLE_objective_function, x0, args=(y, term, m_dict), bounds=bnd, constraints=con)
m_dict[str(('MLE' + str(term)))]['newobj'] = (- MLE_objective_function(mle.x, y, term, m_dict))
m_dict[str(('MLE' + str(term)))]['A'] = mle.x[:term]
m_dict[str(('MLE' + str(term)))]['Y'] = mle.x[term:]
m_dict[str(('MLE' + str(term)))]['oldA'] = a
m_dict[str(('MLE' + str(term)))]['oldY'] = y
out_temp = np.zeros_like(a)
for i in range(term):
out_temp[i] = mle.x[i]
return out_temp | 7,889,138,763,662,265,000 | TODO: write docstring | pymetalog/a_vector.py | a_vector_MLE | sives5/pymetalog | python | def a_vector_MLE(a, y, term, m_dict, bounds, boundedness):
'\n\n '
ym = [newtons_method_metalog(a, xi, term, bounds, boundedness) for xi in m_dict['dataValues']['x']]
def MLE_quantile_constraints(x):
M = [quantileMetalog(x[:term], yi, term, bounds=bounds, boundedness=boundedness) for yi in x[term:]]
return (m_dict['dataValues']['x'] - M)
def MLE_objective_function(x, y, term, m_dict):
return (- np.sum([np.log10(pdfMetalog(x[:term], yi, term, bounds, boundedness)) for yi in np.absolute(x[term:])]))
m_dict[str(('MLE' + str(term)))] = {}
x0 = np.hstack((a[:term], ym))
m_dict[str(('MLE' + str(term)))]['oldobj'] = (- MLE_objective_function(x0, y, term, m_dict))
bnd = ((((None, None),) * len(a)) + (((0, 1),) * (len(x0) - len(a))))
con = NonlinearConstraint(MLE_quantile_constraints, 0, 0)
mle = minimize(MLE_objective_function, x0, args=(y, term, m_dict), bounds=bnd, constraints=con)
m_dict[str(('MLE' + str(term)))]['newobj'] = (- MLE_objective_function(mle.x, y, term, m_dict))
m_dict[str(('MLE' + str(term)))]['A'] = mle.x[:term]
m_dict[str(('MLE' + str(term)))]['Y'] = mle.x[term:]
m_dict[str(('MLE' + str(term)))]['oldA'] = a
m_dict[str(('MLE' + str(term)))]['oldY'] = y
out_temp = np.zeros_like(a)
for i in range(term):
out_temp[i] = mle.x[i]
return out_temp |
def test_alap_pass(self):
'Test ALAP scheduling.'
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[1])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[0], q[1])
qc.cx(q[0], q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend)
expected = Schedule((28, self.cmd_def.get('u2', [0], 3.14, 1.57)), self.cmd_def.get('u2', [1], 0.5, 0.25), (28, self.cmd_def.get('u2', [1], 0.5, 0.25)), (56, self.cmd_def.get('cx', [0, 1])), (78, self.cmd_def.get('measure', [0, 1])))
for (actual, expected) in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels) | -1,342,313,367,346,715,600 | Test ALAP scheduling. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_alap_pass | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_alap_pass(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[1])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[0], q[1])
qc.cx(q[0], q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend)
expected = Schedule((28, self.cmd_def.get('u2', [0], 3.14, 1.57)), self.cmd_def.get('u2', [1], 0.5, 0.25), (28, self.cmd_def.get('u2', [1], 0.5, 0.25)), (56, self.cmd_def.get('cx', [0, 1])), (78, self.cmd_def.get('measure', [0, 1])))
for (actual, expected) in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels) |
def test_alap_with_barriers(self):
'Test that ALAP respects barriers on new qubits.'
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(0, 0, q[0])
qc.barrier(q[0], q[1])
qc.u2(0, 0, q[1])
sched = schedule(qc, self.backend, method='alap')
expected = Schedule(self.cmd_def.get('u2', [0], 0, 0), (28, self.cmd_def.get('u2', [1], 0, 0)))
for (actual, expected) in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels) | -8,490,361,027,831,596,000 | Test that ALAP respects barriers on new qubits. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_alap_with_barriers | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_alap_with_barriers(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(0, 0, q[0])
qc.barrier(q[0], q[1])
qc.u2(0, 0, q[1])
sched = schedule(qc, self.backend, method='alap')
expected = Schedule(self.cmd_def.get('u2', [0], 0, 0), (28, self.cmd_def.get('u2', [1], 0, 0)))
for (actual, expected) in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels) |
def test_alap_aligns_end(self):
'Test that ALAP always acts as though there is a final global barrier.'
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u3(0, 0, 0, q[0])
qc.u2(0, 0, q[1])
sched = schedule(qc, self.backend, method='alap')
expected_sched = Schedule(self.cmd_def.get('u2', [1], 0, 0), (26, self.cmd_def.get('u3', [0], 0, 0, 0)))
for (actual, expected) in zip(sched.instructions, expected_sched.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels)
self.assertEqual(sched.ch_duration(DriveChannel(0)), expected_sched.ch_duration(DriveChannel(1))) | 7,536,511,695,377,579,000 | Test that ALAP always acts as though there is a final global barrier. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_alap_aligns_end | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_alap_aligns_end(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u3(0, 0, 0, q[0])
qc.u2(0, 0, q[1])
sched = schedule(qc, self.backend, method='alap')
expected_sched = Schedule(self.cmd_def.get('u2', [1], 0, 0), (26, self.cmd_def.get('u3', [0], 0, 0, 0)))
for (actual, expected) in zip(sched.instructions, expected_sched.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels)
self.assertEqual(sched.ch_duration(DriveChannel(0)), expected_sched.ch_duration(DriveChannel(1))) |
def test_asap_pass(self):
'Test ASAP scheduling.'
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[1])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[0], q[1])
qc.cx(q[0], q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend, method='as_soon_as_possible')
expected = Schedule(self.cmd_def.get('u2', [0], 3.14, 1.57), self.cmd_def.get('u2', [1], 0.5, 0.25), (28, self.cmd_def.get('u2', [1], 0.5, 0.25)), (56, self.cmd_def.get('cx', [0, 1])), (78, self.cmd_def.get('measure', [0, 1])))
self.assertEqual(sched.instructions, expected.instructions) | -1,838,080,591,864,963,300 | Test ASAP scheduling. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_asap_pass | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_asap_pass(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[1])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[0], q[1])
qc.cx(q[0], q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend, method='as_soon_as_possible')
expected = Schedule(self.cmd_def.get('u2', [0], 3.14, 1.57), self.cmd_def.get('u2', [1], 0.5, 0.25), (28, self.cmd_def.get('u2', [1], 0.5, 0.25)), (56, self.cmd_def.get('cx', [0, 1])), (78, self.cmd_def.get('measure', [0, 1])))
self.assertEqual(sched.instructions, expected.instructions) |
def test_alap_resource_respecting(self):
"Test that the ALAP pass properly respects busy resources when backwards scheduling.\n For instance, a CX on 0 and 1 followed by an X on only 1 must respect both qubits'\n timeline."
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.5, 0.25, q[1])
sched = schedule(qc, self.backend, method='as_late_as_possible')
insts = sched.instructions
self.assertEqual(insts[0][0], 0)
self.assertEqual(insts[4][0], 22)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.5, 0.25, q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend, method='as_late_as_possible')
self.assertEqual(sched.instructions[(- 1)][0], 50) | -4,846,775,682,320,039,000 | Test that the ALAP pass properly respects busy resources when backwards scheduling.
For instance, a CX on 0 and 1 followed by an X on only 1 must respect both qubits'
timeline. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_alap_resource_respecting | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_alap_resource_respecting(self):
"Test that the ALAP pass properly respects busy resources when backwards scheduling.\n For instance, a CX on 0 and 1 followed by an X on only 1 must respect both qubits'\n timeline."
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.5, 0.25, q[1])
sched = schedule(qc, self.backend, method='as_late_as_possible')
insts = sched.instructions
self.assertEqual(insts[0][0], 0)
self.assertEqual(insts[4][0], 22)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.5, 0.25, q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend, method='as_late_as_possible')
self.assertEqual(sched.instructions[(- 1)][0], 50) |
def test_cmd_def_schedules_unaltered(self):
"Test that forward scheduling doesn't change relative timing with a command."
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
sched1 = schedule(qc, self.backend, method='as_soon_as_possible')
sched2 = schedule(qc, self.backend, method='as_late_as_possible')
self.assertEqual(sched1.instructions, sched2.instructions)
insts = sched1.instructions
self.assertEqual(insts[0][0], 0)
self.assertEqual(insts[1][0], 10)
self.assertEqual(insts[2][0], 20)
self.assertEqual(insts[3][0], 20) | 6,633,446,773,433,102,000 | Test that forward scheduling doesn't change relative timing with a command. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_cmd_def_schedules_unaltered | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_cmd_def_schedules_unaltered(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
sched1 = schedule(qc, self.backend, method='as_soon_as_possible')
sched2 = schedule(qc, self.backend, method='as_late_as_possible')
self.assertEqual(sched1.instructions, sched2.instructions)
insts = sched1.instructions
self.assertEqual(insts[0][0], 0)
self.assertEqual(insts[1][0], 10)
self.assertEqual(insts[2][0], 20)
self.assertEqual(insts[3][0], 20) |
def test_measure_combined(self):
'\n Test to check for measure on the same qubit which generated another measure schedule.\n\n The measures on different qubits are combined, but measures on the same qubit\n adds another measure to the schedule.\n '
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.cx(q[0], q[1])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.measure(q[1], c[1])
sched = schedule(qc, self.backend, method='as_soon_as_possible')
expected = Schedule(self.cmd_def.get('u2', [0], 3.14, 1.57), (28, self.cmd_def.get('cx', [0, 1])), (50, self.cmd_def.get('measure', [0, 1])), (60, self.cmd_def.get('measure', [0, 1]).filter(channels=[MeasureChannel(1)])), (60, Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1)], [MemorySlot(0), MemorySlot(1)])))
self.assertEqual(sched.instructions, expected.instructions) | -8,182,329,726,274,524,000 | Test to check for measure on the same qubit which generated another measure schedule.
The measures on different qubits are combined, but measures on the same qubit
adds another measure to the schedule. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_measure_combined | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_measure_combined(self):
'\n Test to check for measure on the same qubit which generated another measure schedule.\n\n The measures on different qubits are combined, but measures on the same qubit\n adds another measure to the schedule.\n '
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.cx(q[0], q[1])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.measure(q[1], c[1])
sched = schedule(qc, self.backend, method='as_soon_as_possible')
expected = Schedule(self.cmd_def.get('u2', [0], 3.14, 1.57), (28, self.cmd_def.get('cx', [0, 1])), (50, self.cmd_def.get('measure', [0, 1])), (60, self.cmd_def.get('measure', [0, 1]).filter(channels=[MeasureChannel(1)])), (60, Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1)], [MemorySlot(0), MemorySlot(1)])))
self.assertEqual(sched.instructions, expected.instructions) |
def test_3q_schedule(self):
'Test a schedule that was recommended by David McKay :D '
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.778, 0.122, q[2])
qc.u3(3.14, 1.57, 0.0, q[0])
qc.u2(3.14, 1.57, q[1])
qc.cx(q[1], q[2])
qc.u2(0.778, 0.122, q[2])
sched = schedule(qc, backend)
expected = Schedule(cmd_def.get('cx', [0, 1]), (22, cmd_def.get('u2', [1], 3.14, 1.57)), (46, cmd_def.get('u2', [2], 0.778, 0.122)), (50, cmd_def.get('cx', [1, 2])), (72, cmd_def.get('u2', [2], 0.778, 0.122)), (74, cmd_def.get('u3', [0], 3.14, 1.57)))
self.assertEqual(sched.instructions, expected.instructions) | -2,168,647,660,574,186,000 | Test a schedule that was recommended by David McKay :D | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_3q_schedule | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_3q_schedule(self):
' '
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.778, 0.122, q[2])
qc.u3(3.14, 1.57, 0.0, q[0])
qc.u2(3.14, 1.57, q[1])
qc.cx(q[1], q[2])
qc.u2(0.778, 0.122, q[2])
sched = schedule(qc, backend)
expected = Schedule(cmd_def.get('cx', [0, 1]), (22, cmd_def.get('u2', [1], 3.14, 1.57)), (46, cmd_def.get('u2', [2], 0.778, 0.122)), (50, cmd_def.get('cx', [1, 2])), (72, cmd_def.get('u2', [2], 0.778, 0.122)), (74, cmd_def.get('u3', [0], 3.14, 1.57)))
self.assertEqual(sched.instructions, expected.instructions) |
def test_schedule_multi(self):
'Test scheduling multiple circuits at once.'
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc0 = QuantumCircuit(q, c)
qc0.cx(q[0], q[1])
qc1 = QuantumCircuit(q, c)
qc1.cx(q[0], q[1])
schedules = schedule([qc0, qc1], self.backend)
expected_insts = schedule(qc0, self.backend).instructions
self.assertEqual(schedules[0].instructions, expected_insts) | 2,331,117,832,550,685,000 | Test scheduling multiple circuits at once. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_schedule_multi | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_schedule_multi(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc0 = QuantumCircuit(q, c)
qc0.cx(q[0], q[1])
qc1 = QuantumCircuit(q, c)
qc1.cx(q[0], q[1])
schedules = schedule([qc0, qc1], self.backend)
expected_insts = schedule(qc0, self.backend).instructions
self.assertEqual(schedules[0].instructions, expected_insts) |
def test_circuit_name_kept(self):
'Test that the new schedule gets its name from the circuit.'
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c, name='CIRCNAME')
qc.cx(q[0], q[1])
sched = schedule(qc, self.backend, method='asap')
self.assertEqual(sched.name, qc.name)
sched = schedule(qc, self.backend, method='alap')
self.assertEqual(sched.name, qc.name) | 4,632,417,592,669,203,000 | Test that the new schedule gets its name from the circuit. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_circuit_name_kept | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_circuit_name_kept(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c, name='CIRCNAME')
qc.cx(q[0], q[1])
sched = schedule(qc, self.backend, method='asap')
self.assertEqual(sched.name, qc.name)
sched = schedule(qc, self.backend, method='alap')
self.assertEqual(sched.name, qc.name) |
def test_can_add_gates_into_free_space(self):
'The scheduler does some time bookkeeping to know when qubits are free to be\n scheduled. Make sure this works for qubits that are used in the future. This was\n a bug, uncovered by this example:\n\n q0 = - - - - |X|\n q1 = |X| |u2| |X|\n\n In ALAP scheduling, the next operation on qubit 0 would be added at t=0 rather\n than immediately before the X gate.\n '
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
for i in range(2):
qc.u2(0, 0, [qr[i]])
qc.u1(3.14, [qr[i]])
qc.u2(0, 0, [qr[i]])
sched = schedule(qc, self.backend, method='alap')
expected = Schedule(self.cmd_def.get('u2', [0], 0, 0), self.cmd_def.get('u2', [1], 0, 0), (28, self.cmd_def.get('u1', [0], 3.14)), (28, self.cmd_def.get('u1', [1], 3.14)), (28, self.cmd_def.get('u2', [0], 0, 0)), (28, self.cmd_def.get('u2', [1], 0, 0)))
self.assertEqual(sched.instructions, expected.instructions) | -2,730,611,971,349,152,300 | The scheduler does some time bookkeeping to know when qubits are free to be
scheduled. Make sure this works for qubits that are used in the future. This was
a bug, uncovered by this example:
q0 = - - - - |X|
q1 = |X| |u2| |X|
In ALAP scheduling, the next operation on qubit 0 would be added at t=0 rather
than immediately before the X gate. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_can_add_gates_into_free_space | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_can_add_gates_into_free_space(self):
'The scheduler does some time bookkeeping to know when qubits are free to be\n scheduled. Make sure this works for qubits that are used in the future. This was\n a bug, uncovered by this example:\n\n q0 = - - - - |X|\n q1 = |X| |u2| |X|\n\n In ALAP scheduling, the next operation on qubit 0 would be added at t=0 rather\n than immediately before the X gate.\n '
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
for i in range(2):
qc.u2(0, 0, [qr[i]])
qc.u1(3.14, [qr[i]])
qc.u2(0, 0, [qr[i]])
sched = schedule(qc, self.backend, method='alap')
expected = Schedule(self.cmd_def.get('u2', [0], 0, 0), self.cmd_def.get('u2', [1], 0, 0), (28, self.cmd_def.get('u1', [0], 3.14)), (28, self.cmd_def.get('u1', [1], 3.14)), (28, self.cmd_def.get('u2', [0], 0, 0)), (28, self.cmd_def.get('u2', [1], 0, 0)))
self.assertEqual(sched.instructions, expected.instructions) |
def test_barriers_in_middle(self):
'As a follow on to `test_can_add_gates_into_free_space`, similar issues\n arose for barriers, specifically.\n '
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
for i in range(2):
qc.u2(0, 0, [qr[i]])
qc.barrier(qr[i])
qc.u1(3.14, [qr[i]])
qc.barrier(qr[i])
qc.u2(0, 0, [qr[i]])
sched = schedule(qc, self.backend, method='alap')
expected = Schedule(self.cmd_def.get('u2', [0], 0, 0), self.cmd_def.get('u2', [1], 0, 0), (28, self.cmd_def.get('u1', [0], 3.14)), (28, self.cmd_def.get('u1', [1], 3.14)), (28, self.cmd_def.get('u2', [0], 0, 0)), (28, self.cmd_def.get('u2', [1], 0, 0)))
self.assertEqual(sched.instructions, expected.instructions) | -3,675,416,939,452,719,600 | As a follow on to `test_can_add_gates_into_free_space`, similar issues
arose for barriers, specifically. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_barriers_in_middle | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_barriers_in_middle(self):
'As a follow on to `test_can_add_gates_into_free_space`, similar issues\n arose for barriers, specifically.\n '
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
for i in range(2):
qc.u2(0, 0, [qr[i]])
qc.barrier(qr[i])
qc.u1(3.14, [qr[i]])
qc.barrier(qr[i])
qc.u2(0, 0, [qr[i]])
sched = schedule(qc, self.backend, method='alap')
expected = Schedule(self.cmd_def.get('u2', [0], 0, 0), self.cmd_def.get('u2', [1], 0, 0), (28, self.cmd_def.get('u1', [0], 3.14)), (28, self.cmd_def.get('u1', [1], 3.14)), (28, self.cmd_def.get('u2', [0], 0, 0)), (28, self.cmd_def.get('u2', [1], 0, 0)))
self.assertEqual(sched.instructions, expected.instructions) |
def test_only_needed_measures(self):
'Test that `MeasureChannel`s are only added for measured qubits.'
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.measure(q[1], c[1])
sched_all_channels = schedule(qc, self.backend, method='as_soon_as_possible').channels
deleted_channels = [MeasureChannel(0)]
self.assertNotIn(sched_all_channels, deleted_channels) | -2,662,223,638,239,911,000 | Test that `MeasureChannel`s are only added for measured qubits. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_only_needed_measures | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_only_needed_measures(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.measure(q[1], c[1])
sched_all_channels = schedule(qc, self.backend, method='as_soon_as_possible').channels
deleted_channels = [MeasureChannel(0)]
self.assertNotIn(sched_all_channels, deleted_channels) |
def test_user_mapping_for_memslots(self):
'\n Test that the new schedule only has required `MeasureChannel`s and that the\n `MemorySlot`s are mapped according to the input circuit.\n '
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.measure(q[0], c[1])
sched = schedule(qc, self.backend)
expected = Schedule(self.cmd_def.get('measure', [0, 1]).filter(channels=[MeasureChannel(0)]), Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1)], [MemorySlot(1), MemorySlot(0)]))
self.assertEqual(sched.instructions, expected.instructions) | -4,664,338,327,747,616,000 | Test that the new schedule only has required `MeasureChannel`s and that the
`MemorySlot`s are mapped according to the input circuit. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_user_mapping_for_memslots | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_user_mapping_for_memslots(self):
'\n Test that the new schedule only has required `MeasureChannel`s and that the\n `MemorySlot`s are mapped according to the input circuit.\n '
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.measure(q[0], c[1])
sched = schedule(qc, self.backend)
expected = Schedule(self.cmd_def.get('measure', [0, 1]).filter(channels=[MeasureChannel(0)]), Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1)], [MemorySlot(1), MemorySlot(0)]))
self.assertEqual(sched.instructions, expected.instructions) |
def test_user_mapping_for_memslots_3Q(self):
'Test measuring two of three qubits.'
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q, c)
qc.measure(q[1], c[2])
qc.measure(q[2], c[0])
sched = schedule(qc, backend)
expected = Schedule(cmd_def.get('measure', [0, 1, 2]).filter(channels=[MeasureChannel(1), MeasureChannel(2)]), Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)], [MemorySlot(1), MemorySlot(2), MemorySlot(0)]))
self.assertEqual(sched.instructions, expected.instructions) | 5,272,640,111,508,526,000 | Test measuring two of three qubits. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_user_mapping_for_memslots_3Q | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_user_mapping_for_memslots_3Q(self):
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q, c)
qc.measure(q[1], c[2])
qc.measure(q[2], c[0])
sched = schedule(qc, backend)
expected = Schedule(cmd_def.get('measure', [0, 1, 2]).filter(channels=[MeasureChannel(1), MeasureChannel(2)]), Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)], [MemorySlot(1), MemorySlot(2), MemorySlot(0)]))
self.assertEqual(sched.instructions, expected.instructions) |
def test_multiple_measure_in_3Q(self):
'Test multiple measure, user memslot mapping, 3Q.'
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(5)
qc = QuantumCircuit(q, c)
qc.measure(q[0], c[2])
qc.measure(q[0], c[4])
sched = schedule(qc, backend)
expected = Schedule(cmd_def.get('measure', [0, 1, 2]).filter(channels=[MeasureChannel(0)]), Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)], [MemorySlot(2), MemorySlot(0), MemorySlot(1)]), (10, cmd_def.get('measure', [0, 1, 2]).filter(channels=[MeasureChannel(0)])), (10, Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)], [MemorySlot(4), MemorySlot(0), MemorySlot(1)])))
self.assertEqual(sched.instructions, expected.instructions) | 2,211,987,329,034,881,300 | Test multiple measure, user memslot mapping, 3Q. | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | test_multiple_measure_in_3Q | MattePalte/Bugs-Quantum-Computing-Platforms | python | def test_multiple_measure_in_3Q(self):
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(5)
qc = QuantumCircuit(q, c)
qc.measure(q[0], c[2])
qc.measure(q[0], c[4])
sched = schedule(qc, backend)
expected = Schedule(cmd_def.get('measure', [0, 1, 2]).filter(channels=[MeasureChannel(0)]), Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)], [MemorySlot(2), MemorySlot(0), MemorySlot(1)]), (10, cmd_def.get('measure', [0, 1, 2]).filter(channels=[MeasureChannel(0)])), (10, Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)], [MemorySlot(4), MemorySlot(0), MemorySlot(1)])))
self.assertEqual(sched.instructions, expected.instructions) |
def dav_index(context, data):
'List files in a WebDAV directory.'
url = data.get('url')
context.log.info(('Fetching WebDAV path: %s' % url))
result = context.http.request('PROPFIND', url)
for resp in result.xml.findall('./{DAV:}response'):
href = resp.findtext('./{DAV:}href')
if (href is None):
continue
child_url = urljoin(url, href)
if (child_url == url):
continue
child = dict(data)
child['url'] = child_url
child['foreign_id'] = child_url
child['file_name'] = _get_url_file_name(href)
rule = 'file'
if (resp.find('.//{DAV:}collection') is not None):
rule = 'folder'
context.emit(data=child, rule=rule) | 8,402,320,049,363,877,000 | List files in a WebDAV directory. | memorious/operations/webdav.py | dav_index | Rosencrantz/memorious | python | def dav_index(context, data):
url = data.get('url')
context.log.info(('Fetching WebDAV path: %s' % url))
result = context.http.request('PROPFIND', url)
for resp in result.xml.findall('./{DAV:}response'):
href = resp.findtext('./{DAV:}href')
if (href is None):
continue
child_url = urljoin(url, href)
if (child_url == url):
continue
child = dict(data)
child['url'] = child_url
child['foreign_id'] = child_url
child['file_name'] = _get_url_file_name(href)
rule = 'file'
if (resp.find('.//{DAV:}collection') is not None):
rule = 'folder'
context.emit(data=child, rule=rule) |
def default_data_collator(features: List[InputDataClass]) -> Dict[(str, torch.Tensor)]:
"\n Very simple data collator that simply collates batches of dict-like objects and performs special handling for\n potential keys named:\n\n - ``label``: handles a single value (int or float) per object\n - ``label_ids``: handles a list of values per object\n\n Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs\n to the model. See glue and ner for example of how it's useful.\n "
if (not isinstance(features[0], (dict, BatchEncoding))):
features = [vars(f) for f in features]
first = features[0]
batch = {}
if (('label' in first) and (first['label'] is not None)):
label = (first['label'].item() if isinstance(first['label'], torch.Tensor) else first['label'])
dtype = (torch.long if isinstance(label, int) else torch.float)
batch['labels'] = torch.tensor([f['label'] for f in features], dtype=dtype)
elif (('label_ids' in first) and (first['label_ids'] is not None)):
if isinstance(first['label_ids'], torch.Tensor):
batch['labels'] = torch.stack([f['label_ids'] for f in features])
else:
dtype = (torch.long if (type(first['label_ids'][0]) is int) else torch.float)
batch['labels'] = torch.tensor([f['label_ids'] for f in features], dtype=dtype)
for (k, v) in first.items():
if ((k not in ('label', 'label_ids')) and (v is not None) and (not isinstance(v, str))):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features])
return batch | -5,154,268,821,003,138,000 | Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- ``label``: handles a single value (int or float) per object
- ``label_ids``: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful. | src/transformers/data/data_collator.py | default_data_collator | 21jun/transformers | python | def default_data_collator(features: List[InputDataClass]) -> Dict[(str, torch.Tensor)]:
"\n Very simple data collator that simply collates batches of dict-like objects and performs special handling for\n potential keys named:\n\n - ``label``: handles a single value (int or float) per object\n - ``label_ids``: handles a list of values per object\n\n Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs\n to the model. See glue and ner for example of how it's useful.\n "
if (not isinstance(features[0], (dict, BatchEncoding))):
features = [vars(f) for f in features]
first = features[0]
batch = {}
if (('label' in first) and (first['label'] is not None)):
label = (first['label'].item() if isinstance(first['label'], torch.Tensor) else first['label'])
dtype = (torch.long if isinstance(label, int) else torch.float)
batch['labels'] = torch.tensor([f['label'] for f in features], dtype=dtype)
elif (('label_ids' in first) and (first['label_ids'] is not None)):
if isinstance(first['label_ids'], torch.Tensor):
batch['labels'] = torch.stack([f['label_ids'] for f in features])
else:
dtype = (torch.long if (type(first['label_ids'][0]) is int) else torch.float)
batch['labels'] = torch.tensor([f['label_ids'] for f in features], dtype=dtype)
for (k, v) in first.items():
if ((k not in ('label', 'label_ids')) and (v is not None) and (not isinstance(v, str))):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features])
return batch |
def _collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int]=None):
'Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.'
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
length_of_first = examples[0].size(0)
are_tensors_same_length = all(((x.size(0) == length_of_first) for x in examples))
if (are_tensors_same_length and ((pad_to_multiple_of is None) or ((length_of_first % pad_to_multiple_of) == 0))):
return torch.stack(examples, dim=0)
if (tokenizer._pad_token is None):
raise ValueError(f'You are attempting to pad samples but the tokenizer you are using ({tokenizer.__class__.__name__}) does not have a pad token.')
max_length = max((x.size(0) for x in examples))
if ((pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)):
max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of)
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for (i, example) in enumerate(examples):
if (tokenizer.padding_side == 'right'):
result[i, :example.shape[0]] = example
else:
result[i, (- example.shape[0]):] = example
return result | 9,111,441,324,026,435,000 | Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary. | src/transformers/data/data_collator.py | _collate_batch | 21jun/transformers | python | def _collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int]=None):
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
length_of_first = examples[0].size(0)
are_tensors_same_length = all(((x.size(0) == length_of_first) for x in examples))
if (are_tensors_same_length and ((pad_to_multiple_of is None) or ((length_of_first % pad_to_multiple_of) == 0))):
return torch.stack(examples, dim=0)
if (tokenizer._pad_token is None):
raise ValueError(f'You are attempting to pad samples but the tokenizer you are using ({tokenizer.__class__.__name__}) does not have a pad token.')
max_length = max((x.size(0) for x in examples))
if ((pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)):
max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of)
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for (i, example) in enumerate(examples):
if (tokenizer.padding_side == 'right'):
result[i, :example.shape[0]] = example
else:
result[i, (- example.shape[0]):] = example
return result |
def mask_tokens(self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor]=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
'\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.\n '
labels = inputs.clone()
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if (special_tokens_mask is None):
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[(~ masked_indices)] = (- 100)
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced))
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels) | -6,466,449,275,945,545,000 | Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. | src/transformers/data/data_collator.py | mask_tokens | 21jun/transformers | python | def mask_tokens(self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor]=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
'\n \n '
labels = inputs.clone()
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if (special_tokens_mask is None):
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[(~ masked_indices)] = (- 100)
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced))
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels) |
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
'\n Get 0/1 labels for masked tokens with whole word mask proxy\n '
if (not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast))):
warnings.warn('DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers.Please refer to the documentation for more information.')
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if ((token == '[CLS]') or (token == '[SEP]')):
continue
if ((len(cand_indexes) >= 1) and token.startswith('##')):
cand_indexes[(- 1)].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round((len(input_tokens) * self.mlm_probability)))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if (len(masked_lms) >= num_to_predict):
break
if ((len(masked_lms) + len(index_set)) > num_to_predict):
continue
is_any_index_covered = False
for index in index_set:
if (index in covered_indexes):
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
assert (len(covered_indexes) == len(masked_lms))
mask_labels = [(1 if (i in covered_indexes) else 0) for i in range(len(input_tokens))]
return mask_labels | -4,265,430,114,033,264,000 | Get 0/1 labels for masked tokens with whole word mask proxy | src/transformers/data/data_collator.py | _whole_word_mask | 21jun/transformers | python | def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
'\n \n '
if (not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast))):
warnings.warn('DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers.Please refer to the documentation for more information.')
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if ((token == '[CLS]') or (token == '[SEP]')):
continue
if ((len(cand_indexes) >= 1) and token.startswith('##')):
cand_indexes[(- 1)].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round((len(input_tokens) * self.mlm_probability)))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if (len(masked_lms) >= num_to_predict):
break
if ((len(masked_lms) + len(index_set)) > num_to_predict):
continue
is_any_index_covered = False
for index in index_set:
if (index in covered_indexes):
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
assert (len(covered_indexes) == len(masked_lms))
mask_labels = [(1 if (i in covered_indexes) else 0) for i in range(len(input_tokens))]
return mask_labels |
def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set\n 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.\n "
if (self.tokenizer.mask_token is None):
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.')
labels = inputs.clone()
probability_matrix = mask_labels
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if (self.tokenizer._pad_token is not None):
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[(~ masked_indices)] = (- 100)
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced))
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels) | -6,416,139,270,136,281,000 | Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref. | src/transformers/data/data_collator.py | mask_tokens | 21jun/transformers | python | def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set\n 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.\n "
if (self.tokenizer.mask_token is None):
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.')
labels = inputs.clone()
probability_matrix = mask_labels
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if (self.tokenizer._pad_token is not None):
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[(~ masked_indices)] = (- 100)
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced))
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels) |
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
'\n Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%\n original. N-gram not applied yet.\n '
if (self.tokenizer.mask_token is None):
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.')
labels = inputs.clone()
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if (self.tokenizer._pad_token is not None):
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
attention_mask = (~ masked_indices).float()
if (self.tokenizer._pad_token is not None):
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[(~ masked_indices)] = (- 100)
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced))
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels, attention_mask) | 6,151,136,398,149,454,000 | Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet. | src/transformers/data/data_collator.py | mask_tokens | 21jun/transformers | python | def mask_tokens(self, inputs: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
'\n Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%\n original. N-gram not applied yet.\n '
if (self.tokenizer.mask_token is None):
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.')
labels = inputs.clone()
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if (self.tokenizer._pad_token is not None):
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
attention_mask = (~ masked_indices).float()
if (self.tokenizer._pad_token is not None):
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[(~ masked_indices)] = (- 100)
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced))
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels, attention_mask) |
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor)]:
'\n The masked tokens to be predicted for a particular sequence are determined by the following algorithm:\n\n 0. Start from the beginning of the sequence by setting ``cur_len = 0`` (number of tokens processed so far).\n 1. Sample a ``span_length`` from the interval ``[1, max_span_length]`` (length of span of tokens to be\n masked)\n 2. Reserve a context of length ``context_length = span_length / plm_probability`` to surround span to be\n masked\n 3. Sample a starting point ``start_index`` from the interval ``[cur_len, cur_len + context_length -\n span_length]`` and mask tokens ``start_index:start_index + span_length``\n 4. Set ``cur_len = cur_len + context_length``. If ``cur_len < max_len`` (i.e. there are tokens remaining in\n the sequence to be processed), repeat from Step 1.\n '
if (self.tokenizer.mask_token is None):
raise ValueError('This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.')
if ((inputs.size(1) % 2) != 0):
raise ValueError('This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.')
labels = inputs.clone()
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
cur_len = 0
max_len = labels.size(1)
while (cur_len < max_len):
span_length = torch.randint(1, (self.max_span_length + 1), (1,)).item()
context_length = int((span_length / self.plm_probability))
start_index = (cur_len + torch.randint(((context_length - span_length) + 1), (1,)).item())
masked_indices[i, start_index:(start_index + span_length)] = 1
cur_len += context_length
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor([self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()], dtype=torch.bool)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if (self.tokenizer._pad_token is not None):
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
non_func_mask = (~ (padding_mask | special_tokens_mask))
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[(~ masked_indices)] = (- 100)
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
perm_index = torch.arange(labels.size(1))
perm_index = perm_index.reshape(((- 1), (labels.size(1) // 2))).transpose(0, 1)
perm_index = perm_index[torch.randperm((labels.size(1) // 2))]
perm_index = torch.flatten(perm_index.transpose(0, 1))
perm_index.masked_fill_(((~ masked_indices[i]) & non_func_mask[i]), (- 1))
perm_mask[i] = ((perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))) & masked_indices[i])
return (inputs.long(), perm_mask, target_mapping, labels.long()) | -8,821,713,950,774,863,000 | The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting ``cur_len = 0`` (number of tokens processed so far).
1. Sample a ``span_length`` from the interval ``[1, max_span_length]`` (length of span of tokens to be
masked)
2. Reserve a context of length ``context_length = span_length / plm_probability`` to surround span to be
masked
3. Sample a starting point ``start_index`` from the interval ``[cur_len, cur_len + context_length -
span_length]`` and mask tokens ``start_index:start_index + span_length``
4. Set ``cur_len = cur_len + context_length``. If ``cur_len < max_len`` (i.e. there are tokens remaining in
the sequence to be processed), repeat from Step 1. | src/transformers/data/data_collator.py | mask_tokens | 21jun/transformers | python | def mask_tokens(self, inputs: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor)]:
'\n The masked tokens to be predicted for a particular sequence are determined by the following algorithm:\n\n 0. Start from the beginning of the sequence by setting ``cur_len = 0`` (number of tokens processed so far).\n 1. Sample a ``span_length`` from the interval ``[1, max_span_length]`` (length of span of tokens to be\n masked)\n 2. Reserve a context of length ``context_length = span_length / plm_probability`` to surround span to be\n masked\n 3. Sample a starting point ``start_index`` from the interval ``[cur_len, cur_len + context_length -\n span_length]`` and mask tokens ``start_index:start_index + span_length``\n 4. Set ``cur_len = cur_len + context_length``. If ``cur_len < max_len`` (i.e. there are tokens remaining in\n the sequence to be processed), repeat from Step 1.\n '
if (self.tokenizer.mask_token is None):
raise ValueError('This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.')
if ((inputs.size(1) % 2) != 0):
raise ValueError('This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.')
labels = inputs.clone()
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
cur_len = 0
max_len = labels.size(1)
while (cur_len < max_len):
span_length = torch.randint(1, (self.max_span_length + 1), (1,)).item()
context_length = int((span_length / self.plm_probability))
start_index = (cur_len + torch.randint(((context_length - span_length) + 1), (1,)).item())
masked_indices[i, start_index:(start_index + span_length)] = 1
cur_len += context_length
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor([self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()], dtype=torch.bool)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if (self.tokenizer._pad_token is not None):
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
non_func_mask = (~ (padding_mask | special_tokens_mask))
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[(~ masked_indices)] = (- 100)
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
perm_index = torch.arange(labels.size(1))
perm_index = perm_index.reshape(((- 1), (labels.size(1) // 2))).transpose(0, 1)
perm_index = perm_index[torch.randperm((labels.size(1) // 2))]
perm_index = torch.flatten(perm_index.transpose(0, 1))
perm_index.masked_fill_(((~ masked_indices[i]) & non_func_mask[i]), (- 1))
perm_mask[i] = ((perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))) & masked_indices[i])
return (inputs.long(), perm_mask, target_mapping, labels.long()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.