body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@Issue(4213)
@Skip(svntest.main.is_fs_type_fsx)
def recover_old_empty(sbox):
'recover empty --compatible-version=1.3'
sbox.build(create_wc=False, empty=True, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], 'recover', sbox.repo_dir) | -5,409,064,214,191,564,000 | recover empty --compatible-version=1.3 | subversion/tests/cmdline/svnadmin_tests.py | recover_old_empty | auycro/subversion | python | @Issue(4213)
@Skip(svntest.main.is_fs_type_fsx)
def recover_old_empty(sbox):
sbox.build(create_wc=False, empty=True, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], 'recover', sbox.repo_dir) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_keep_going(sbox):
'svnadmin verify --keep-going test'
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False)
repo_url = sbox.repo_url
B_url = (sbox.repo_url + '/B')
C_url = (sbox.repo_url + '/C')
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', B_url)
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', C_url)
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
fp = open(r2, 'r+b')
fp.write(b'inserting junk to corrupt the rev')
fp.close()
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--keep-going', sbox.repo_dir)
exp_out = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.', '.*', '.*Summary.*', '.*r2: E160004:.*', '.*r2: E160004:.*', '.*r3: E160004:.*', '.*r3: E160004:.*'])
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying.*metadata.*')
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*', '.*Error verifying revision 3.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*', 'svnadmin: E205012:.*'], False)
if svntest.main.is_fs_log_addressing():
exp_err.insert(0, '.*Error verifying repository metadata.')
exp_err.insert(1, 'svnadmin: E160004:.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir)
if svntest.main.is_fs_log_addressing():
exp_out = svntest.verify.RegexListOutput(['.*Verifying metadata at revision 0.*'])
else:
exp_out = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.'])
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying repository metadata.*')
if svntest.main.is_fs_log_addressing():
exp_err = svntest.verify.RegexListOutput(['.*Error verifying repository metadata.', 'svnadmin: E160004:.*'], False)
else:
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*'], False)
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--quiet', sbox.repo_dir)
if svntest.main.is_fs_log_addressing():
exp_err = svntest.verify.RegexListOutput(['.*Error verifying repository metadata.', 'svnadmin: E160004:.*'], False)
else:
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*'], False)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.", None, errput, None, exp_err):
raise svntest.Failure
svntest.main.safe_rmtree(sbox.repo_dir, True) | -444,823,730,030,649,400 | svnadmin verify --keep-going test | subversion/tests/cmdline/svnadmin_tests.py | verify_keep_going | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_keep_going(sbox):
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False)
repo_url = sbox.repo_url
B_url = (sbox.repo_url + '/B')
C_url = (sbox.repo_url + '/C')
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', B_url)
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', C_url)
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
fp = open(r2, 'r+b')
fp.write(b'inserting junk to corrupt the rev')
fp.close()
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--keep-going', sbox.repo_dir)
exp_out = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.', '.*', '.*Summary.*', '.*r2: E160004:.*', '.*r2: E160004:.*', '.*r3: E160004:.*', '.*r3: E160004:.*'])
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying.*metadata.*')
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*', '.*Error verifying revision 3.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*', 'svnadmin: E205012:.*'], False)
if svntest.main.is_fs_log_addressing():
exp_err.insert(0, '.*Error verifying repository metadata.')
exp_err.insert(1, 'svnadmin: E160004:.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir)
if svntest.main.is_fs_log_addressing():
exp_out = svntest.verify.RegexListOutput(['.*Verifying metadata at revision 0.*'])
else:
exp_out = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.'])
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying repository metadata.*')
if svntest.main.is_fs_log_addressing():
exp_err = svntest.verify.RegexListOutput(['.*Error verifying repository metadata.', 'svnadmin: E160004:.*'], False)
else:
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*'], False)
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--quiet', sbox.repo_dir)
if svntest.main.is_fs_log_addressing():
exp_err = svntest.verify.RegexListOutput(['.*Error verifying repository metadata.', 'svnadmin: E160004:.*'], False)
else:
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*'], False)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.", None, errput, None, exp_err):
raise svntest.Failure
svntest.main.safe_rmtree(sbox.repo_dir, True) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_keep_going_quiet(sbox):
'svnadmin verify --keep-going --quiet test'
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False)
repo_url = sbox.repo_url
B_url = (sbox.repo_url + '/B')
C_url = (sbox.repo_url + '/C')
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', B_url)
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', C_url)
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
fp = open(r2, 'r+b')
fp.write(b'inserting junk to corrupt the rev')
fp.close()
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--keep-going', '--quiet', sbox.repo_dir)
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*', '.*Error verifying revision 3.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*', 'svnadmin: E205012:.*'], False)
if svntest.main.is_fs_log_addressing():
exp_err.insert(0, '.*Error verifying repository metadata.')
exp_err.insert(1, 'svnadmin: E160004:.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, None, exp_err):
raise svntest.Failure
svntest.main.safe_rmtree(sbox.repo_dir, True) | -1,586,240,796,637,758,200 | svnadmin verify --keep-going --quiet test | subversion/tests/cmdline/svnadmin_tests.py | verify_keep_going_quiet | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_keep_going_quiet(sbox):
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False)
repo_url = sbox.repo_url
B_url = (sbox.repo_url + '/B')
C_url = (sbox.repo_url + '/C')
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', B_url)
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', C_url)
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
fp = open(r2, 'r+b')
fp.write(b'inserting junk to corrupt the rev')
fp.close()
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--keep-going', '--quiet', sbox.repo_dir)
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*', '.*Error verifying revision 3.', 'svnadmin: E160004:.*', 'svnadmin: E160004:.*', 'svnadmin: E205012:.*'], False)
if svntest.main.is_fs_log_addressing():
exp_err.insert(0, '.*Error verifying repository metadata.')
exp_err.insert(1, 'svnadmin: E160004:.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, None, exp_err):
raise svntest.Failure
svntest.main.safe_rmtree(sbox.repo_dir, True) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_invalid_path_changes(sbox):
'detect invalid changed path list entries'
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False)
repo_url = sbox.repo_url
for r in range(2, 20):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', ((sbox.repo_url + '/B') + str(r)))
set_changed_path_list(sbox, 2, b'_0.0.t1-1 add-dir false false /A\n\n')
set_changed_path_list(sbox, 4, b'_0.0.t3-2 add-dir false false /C/X\n\n')
set_changed_path_list(sbox, 6, b'_0.0.t5-2 delete-dir false false /C\n\n')
set_changed_path_list(sbox, 8, b'_0.0.t7-2 delete-file false false /B3\n\n')
set_changed_path_list(sbox, 10, b'_0.0.t9-2 add-dir false false /B10\n6 /B8\n')
set_changed_path_list(sbox, 12, b'_0.0.t11-2 add-file false false /B12\n9 /B8\n')
set_changed_path_list(sbox, 14, b'_0.0.t13-2 modify-file false false /A/D/H/foo\n\n')
set_changed_path_list(sbox, 16, b'_0.0.t15-2 modify-file false false /B12\n\n')
set_changed_path_list(sbox, 18, b'_0.0.t17-2 replace-file false false /A/D/H/foo\n\n')
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--keep-going', sbox.repo_dir)
exp_out1 = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.', '.*Verified revision 3.', '.*Verified revision 5.', '.*Verified revision 7.', '.*Verified revision 8.', '.*Verified revision 9.', '.*Verified revision 11.', '.*Verified revision 13.', '.*Verified revision 15.', '.*Verified revision 17.', '.*Verified revision 19.', '.*', '.*Summary.*', '.*r2: E160020:.*', '.*r2: E160020:.*', '.*r4: E160013:.*', '.*r6: E160013:.*', '.*r6: E160013:.*', '.*r10: E160013:.*', '.*r10: E160013:.*', '.*r12: E145001:.*', '.*r12: E145001:.*', '.*r14: E160013:.*', '.*r14: E160013:.*', '.*r16: E145001:.*', '.*r16: E145001:.*', '.*r18: E160013:.*', '.*r18: E160013:.*'])
exp_err1 = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160020:.*', 'svnadmin: E160020:.*', '.*Error verifying revision 4.', 'svnadmin: E160013:.*', '.*Error verifying revision 6.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', '.*Error verifying revision 10.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', '.*Error verifying revision 12.', 'svnadmin: E145001:.*', 'svnadmin: E145001:.*', '.*Error verifying revision 14.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', '.*Error verifying revision 16.', 'svnadmin: E145001:.*', 'svnadmin: E145001:.*', '.*Error verifying revision 18.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', 'svnadmin: E205012:.*'], False)
exp_out2 = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.', '.*Verified revision 3.', '.*Verified revision 5.', '.*Verified revision 7.', '.*Verified revision 8.', '.*Verified revision 9.', '.*Verified revision 11.', '.*Verified revision 13.', '.*Verified revision 15.', '.*Verified revision 17.', '.*Verified revision 19.', '.*', '.*Summary.*', '.*r2: E160020:.*', '.*r2: E160020:.*', '.*r4: E160013:.*', '.*r6: E160013:.*', '.*r10: E160013:.*', '.*r10: E160013:.*', '.*r12: E145001:.*', '.*r12: E145001:.*', '.*r14: E160013:.*', '.*r16: E145001:.*', '.*r16: E145001:.*', '.*r18: E160013:.*'])
exp_err2 = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160020:.*', 'svnadmin: E160020:.*', '.*Error verifying revision 4.', 'svnadmin: E160013:.*', '.*Error verifying revision 6.', 'svnadmin: E160013:.*', '.*Error verifying revision 10.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', '.*Error verifying revision 12.', 'svnadmin: E145001:.*', 'svnadmin: E145001:.*', '.*Error verifying revision 14.', 'svnadmin: E160013:.*', '.*Error verifying revision 16.', 'svnadmin: E145001:.*', 'svnadmin: E145001:.*', '.*Error verifying revision 18.', 'svnadmin: E160013:.*', 'svnadmin: E205012:.*'], False)
try:
rev6_line = errput.index('* Error verifying revision 6.\n')
rev10_line = errput.index('* Error verifying revision 10.\n')
error_count = 0
for line in errput[(rev6_line + 1):rev10_line]:
if ('svnadmin: E' in line):
error_count = (error_count + 1)
if (error_count == 1):
exp_out = exp_out2
exp_err = exp_err2
else:
exp_out = exp_out1
exp_err = exp_err1
except ValueError:
exp_out = exp_out1
exp_err = exp_err1
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if (svntest.main.options.fsfs_sharding is not None):
for x in range(0, (19 / svntest.main.options.fsfs_sharding)):
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.main.is_fs_log_addressing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir)
exp_out = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.'])
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160020:.*', 'svnadmin: E160020:.*'], False)
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if (svntest.main.options.fsfs_sharding is not None):
for x in range(0, (19 / svntest.main.options.fsfs_sharding)):
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.main.is_fs_log_addressing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--quiet', sbox.repo_dir)
exp_out = []
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160020:.*', 'svnadmin: E160020:.*'], False)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.", output, errput, exp_out, exp_err):
raise svntest.Failure
svntest.main.safe_rmtree(sbox.repo_dir, True) | 5,820,727,438,573,355,000 | detect invalid changed path list entries | subversion/tests/cmdline/svnadmin_tests.py | verify_invalid_path_changes | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_invalid_path_changes(sbox):
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False)
repo_url = sbox.repo_url
for r in range(2, 20):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', 'log_msg', ((sbox.repo_url + '/B') + str(r)))
set_changed_path_list(sbox, 2, b'_0.0.t1-1 add-dir false false /A\n\n')
set_changed_path_list(sbox, 4, b'_0.0.t3-2 add-dir false false /C/X\n\n')
set_changed_path_list(sbox, 6, b'_0.0.t5-2 delete-dir false false /C\n\n')
set_changed_path_list(sbox, 8, b'_0.0.t7-2 delete-file false false /B3\n\n')
set_changed_path_list(sbox, 10, b'_0.0.t9-2 add-dir false false /B10\n6 /B8\n')
set_changed_path_list(sbox, 12, b'_0.0.t11-2 add-file false false /B12\n9 /B8\n')
set_changed_path_list(sbox, 14, b'_0.0.t13-2 modify-file false false /A/D/H/foo\n\n')
set_changed_path_list(sbox, 16, b'_0.0.t15-2 modify-file false false /B12\n\n')
set_changed_path_list(sbox, 18, b'_0.0.t17-2 replace-file false false /A/D/H/foo\n\n')
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--keep-going', sbox.repo_dir)
exp_out1 = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.', '.*Verified revision 3.', '.*Verified revision 5.', '.*Verified revision 7.', '.*Verified revision 8.', '.*Verified revision 9.', '.*Verified revision 11.', '.*Verified revision 13.', '.*Verified revision 15.', '.*Verified revision 17.', '.*Verified revision 19.', '.*', '.*Summary.*', '.*r2: E160020:.*', '.*r2: E160020:.*', '.*r4: E160013:.*', '.*r6: E160013:.*', '.*r6: E160013:.*', '.*r10: E160013:.*', '.*r10: E160013:.*', '.*r12: E145001:.*', '.*r12: E145001:.*', '.*r14: E160013:.*', '.*r14: E160013:.*', '.*r16: E145001:.*', '.*r16: E145001:.*', '.*r18: E160013:.*', '.*r18: E160013:.*'])
exp_err1 = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160020:.*', 'svnadmin: E160020:.*', '.*Error verifying revision 4.', 'svnadmin: E160013:.*', '.*Error verifying revision 6.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', '.*Error verifying revision 10.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', '.*Error verifying revision 12.', 'svnadmin: E145001:.*', 'svnadmin: E145001:.*', '.*Error verifying revision 14.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', '.*Error verifying revision 16.', 'svnadmin: E145001:.*', 'svnadmin: E145001:.*', '.*Error verifying revision 18.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', 'svnadmin: E205012:.*'], False)
exp_out2 = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.', '.*Verified revision 3.', '.*Verified revision 5.', '.*Verified revision 7.', '.*Verified revision 8.', '.*Verified revision 9.', '.*Verified revision 11.', '.*Verified revision 13.', '.*Verified revision 15.', '.*Verified revision 17.', '.*Verified revision 19.', '.*', '.*Summary.*', '.*r2: E160020:.*', '.*r2: E160020:.*', '.*r4: E160013:.*', '.*r6: E160013:.*', '.*r10: E160013:.*', '.*r10: E160013:.*', '.*r12: E145001:.*', '.*r12: E145001:.*', '.*r14: E160013:.*', '.*r16: E145001:.*', '.*r16: E145001:.*', '.*r18: E160013:.*'])
exp_err2 = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160020:.*', 'svnadmin: E160020:.*', '.*Error verifying revision 4.', 'svnadmin: E160013:.*', '.*Error verifying revision 6.', 'svnadmin: E160013:.*', '.*Error verifying revision 10.', 'svnadmin: E160013:.*', 'svnadmin: E160013:.*', '.*Error verifying revision 12.', 'svnadmin: E145001:.*', 'svnadmin: E145001:.*', '.*Error verifying revision 14.', 'svnadmin: E160013:.*', '.*Error verifying revision 16.', 'svnadmin: E145001:.*', 'svnadmin: E145001:.*', '.*Error verifying revision 18.', 'svnadmin: E160013:.*', 'svnadmin: E205012:.*'], False)
try:
rev6_line = errput.index('* Error verifying revision 6.\n')
rev10_line = errput.index('* Error verifying revision 10.\n')
error_count = 0
for line in errput[(rev6_line + 1):rev10_line]:
if ('svnadmin: E' in line):
error_count = (error_count + 1)
if (error_count == 1):
exp_out = exp_out2
exp_err = exp_err2
else:
exp_out = exp_out1
exp_err = exp_err1
except ValueError:
exp_out = exp_out1
exp_err = exp_err1
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if (svntest.main.options.fsfs_sharding is not None):
for x in range(0, (19 / svntest.main.options.fsfs_sharding)):
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.main.is_fs_log_addressing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir)
exp_out = svntest.verify.RegexListOutput(['.*Verified revision 0.', '.*Verified revision 1.'])
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160020:.*', 'svnadmin: E160020:.*'], False)
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if (svntest.main.options.fsfs_sharding is not None):
for x in range(0, (19 / svntest.main.options.fsfs_sharding)):
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.main.is_fs_log_addressing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--quiet', sbox.repo_dir)
exp_out = []
exp_err = svntest.verify.RegexListOutput(['.*Error verifying revision 2.', 'svnadmin: E160020:.*', 'svnadmin: E160020:.*'], False)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.", output, errput, exp_out, exp_err):
raise svntest.Failure
svntest.main.safe_rmtree(sbox.repo_dir, True) |
def verify_denormalized_names(sbox):
'detect denormalized names and name collisions'
sbox.build(create_wc=False, empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'normalization_check.dump')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dumpfile_location))
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--check-normalization', sbox.repo_dir)
expected_output_regex_list = ['.*Verified revision 0.', '.*Verified revision 1.', '.*Verified revision 2.', '.*Verified revision 3.', "WARNING 0x0003: Duplicate representation of path 'A/.*/.*lpha'", '.*Verified revision 4.', '.*Verified revision 5.', "WARNING 0x0004: Duplicate representation of path '/Q/.*lpha' in svn:mergeinfo property of 'A/.*'", '.*Verified revision 6.', '.*Verified revision 7.']
if (svntest.main.fs_has_rep_sharing() and (not svntest.main.is_fs_type_bdb())):
expected_output_regex_list.insert(0, '.*Verifying repository metadata.*')
if (svntest.main.options.fsfs_sharding is not None):
for x in range(0, (7 / svntest.main.options.fsfs_sharding)):
expected_output_regex_list.insert(0, '.*Verifying.*metadata.*')
if svntest.main.is_fs_log_addressing():
expected_output_regex_list.insert(0, '.* Verifying metadata at revision 0.*')
exp_out = svntest.verify.RegexListOutput(expected_output_regex_list)
exp_err = svntest.verify.ExpectedOutput([])
svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err) | 8,407,033,995,025,627,000 | detect denormalized names and name collisions | subversion/tests/cmdline/svnadmin_tests.py | verify_denormalized_names | auycro/subversion | python | def verify_denormalized_names(sbox):
sbox.build(create_wc=False, empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'normalization_check.dump')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dumpfile_location))
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', '--check-normalization', sbox.repo_dir)
expected_output_regex_list = ['.*Verified revision 0.', '.*Verified revision 1.', '.*Verified revision 2.', '.*Verified revision 3.', "WARNING 0x0003: Duplicate representation of path 'A/.*/.*lpha'", '.*Verified revision 4.', '.*Verified revision 5.', "WARNING 0x0004: Duplicate representation of path '/Q/.*lpha' in svn:mergeinfo property of 'A/.*'", '.*Verified revision 6.', '.*Verified revision 7.']
if (svntest.main.fs_has_rep_sharing() and (not svntest.main.is_fs_type_bdb())):
expected_output_regex_list.insert(0, '.*Verifying repository metadata.*')
if (svntest.main.options.fsfs_sharding is not None):
for x in range(0, (7 / svntest.main.options.fsfs_sharding)):
expected_output_regex_list.insert(0, '.*Verifying.*metadata.*')
if svntest.main.is_fs_log_addressing():
expected_output_regex_list.insert(0, '.* Verifying metadata at revision 0.*')
exp_out = svntest.verify.RegexListOutput(expected_output_regex_list)
exp_err = svntest.verify.ExpectedOutput([])
svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_recover_old_non_empty(sbox):
'fsfs recover non-empty --compatible-version=1.3'
sbox.build(create_wc=False, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], 'recover', sbox.repo_dir) | 544,593,585,622,874,500 | fsfs recover non-empty --compatible-version=1.3 | subversion/tests/cmdline/svnadmin_tests.py | fsfs_recover_old_non_empty | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_recover_old_non_empty(sbox):
sbox.build(create_wc=False, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], 'recover', sbox.repo_dir) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_old_non_empty(sbox):
'fsfs hotcopy non-empty --compatible-version=1.3'
sbox.build(create_wc=False, minor_version=3)
(backup_dir, backup_url) = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir) | -2,298,455,371,426,589,700 | fsfs hotcopy non-empty --compatible-version=1.3 | subversion/tests/cmdline/svnadmin_tests.py | fsfs_hotcopy_old_non_empty | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_old_non_empty(sbox):
sbox.build(create_wc=False, minor_version=3)
(backup_dir, backup_url) = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir) |
def load_ignore_dates(sbox):
'svnadmin load --ignore-dates'
start_time = time.localtime()
time.sleep(1)
sbox.build(create_wc=False, empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'skeleton_repos.dump')
dumpfile_skeleton = svntest.actions.load_dumpfile(dumpfile_location)
load_dumpstream(sbox, dumpfile_skeleton, '--ignore-dates')
svntest.actions.run_and_verify_svnlook(['6\n'], None, 'youngest', sbox.repo_dir)
for rev in range(1, 6):
(exit_code, output, errput) = svntest.main.run_svnlook('date', '-r', rev, sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
rev_time = time.strptime(output[0].rstrip()[:19], '%Y-%m-%d %H:%M:%S')
if (rev_time < start_time):
raise svntest.Failure(('Revision time for r%d older than load start time\n rev_time: %s\n start_time: %s' % (rev, str(rev_time), str(start_time)))) | 7,302,272,163,203,743,000 | svnadmin load --ignore-dates | subversion/tests/cmdline/svnadmin_tests.py | load_ignore_dates | auycro/subversion | python | def load_ignore_dates(sbox):
start_time = time.localtime()
time.sleep(1)
sbox.build(create_wc=False, empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'skeleton_repos.dump')
dumpfile_skeleton = svntest.actions.load_dumpfile(dumpfile_location)
load_dumpstream(sbox, dumpfile_skeleton, '--ignore-dates')
svntest.actions.run_and_verify_svnlook(['6\n'], None, 'youngest', sbox.repo_dir)
for rev in range(1, 6):
(exit_code, output, errput) = svntest.main.run_svnlook('date', '-r', rev, sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
rev_time = time.strptime(output[0].rstrip()[:19], '%Y-%m-%d %H:%M:%S')
if (rev_time < start_time):
raise svntest.Failure(('Revision time for r%d older than load start time\n rev_time: %s\n start_time: %s' % (rev, str(rev_time), str(start_time)))) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_old_with_id_changes(sbox):
'fsfs hotcopy old with node-id and copy-id changes'
sbox.build(create_wc=True, minor_version=3)
(inc_backup_dir, inc_backup_url) = sbox.add_repo_path('incremental-backup')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r1')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_propset('foo', 'bar', 'A/mu')
sbox.simple_commit(message='r2')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r2')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_copy('A/B/E', 'A/B/E1')
sbox.simple_commit(message='r3')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r3')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_rm('A/D/gamma')
sbox.simple_commit(message='r4')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r4')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_add_text('This is the replaced file.\n', 'A/D/gamma')
sbox.simple_commit(message='r5')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r5')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_add_text('This is an entirely new file.\n', 'A/C/mu1')
sbox.simple_commit(message='r6')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r6')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_append('A/mu', 'This is change in the existing file.\n')
sbox.simple_commit(message='r7')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r7')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir) | -3,939,098,385,355,646,000 | fsfs hotcopy old with node-id and copy-id changes | subversion/tests/cmdline/svnadmin_tests.py | fsfs_hotcopy_old_with_id_changes | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_old_with_id_changes(sbox):
sbox.build(create_wc=True, minor_version=3)
(inc_backup_dir, inc_backup_url) = sbox.add_repo_path('incremental-backup')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r1')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_propset('foo', 'bar', 'A/mu')
sbox.simple_commit(message='r2')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r2')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_copy('A/B/E', 'A/B/E1')
sbox.simple_commit(message='r3')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r3')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_rm('A/D/gamma')
sbox.simple_commit(message='r4')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r4')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_add_text('This is the replaced file.\n', 'A/D/gamma')
sbox.simple_commit(message='r5')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r5')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_add_text('This is an entirely new file.\n', 'A/C/mu1')
sbox.simple_commit(message='r6')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r6')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
sbox.simple_append('A/mu', 'This is change in the existing file.\n')
sbox.simple_commit(message='r7')
(backup_dir, backup_url) = sbox.add_repo_path('backup-after-r7')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir) |
@SkipUnless(svntest.main.fs_has_pack)
def verify_packed(sbox):
'verify packed with small shards'
sbox.build()
patch_format(sbox.repo_dir, shard_size=2)
sbox.simple_append('iota', 'Line.\n')
sbox.simple_append('A/D/gamma', 'Another line.\n')
sbox.simple_commit(message='r2')
sbox.simple_propset('foo', 'bar', 'iota')
sbox.simple_propset('foo', 'baz', 'A/mu')
sbox.simple_commit(message='r3')
sbox.simple_rm('A/C')
sbox.simple_copy('A/B/E', 'A/B/E1')
sbox.simple_move('A/mu', 'A/B/mu')
sbox.simple_commit(message='r4')
sbox.simple_propdel('foo', 'A/B/mu')
sbox.simple_commit(message='r5')
if (svntest.main.is_fs_type_fsfs and svntest.main.options.fsfs_packing):
pass
else:
expected_output = ['Packing revisions in shard 0...done.\n', 'Packing revisions in shard 1...done.\n', 'Packing revisions in shard 2...done.\n']
svntest.actions.run_and_verify_svnadmin(expected_output, [], 'pack', sbox.repo_dir)
if svntest.main.is_fs_log_addressing():
expected_output = ['* Verifying metadata at revision 0 ...\n', '* Verifying metadata at revision 2 ...\n', '* Verifying metadata at revision 4 ...\n', '* Verifying repository metadata ...\n', '* Verified revision 0.\n', '* Verified revision 1.\n', '* Verified revision 2.\n', '* Verified revision 3.\n', '* Verified revision 4.\n', '* Verified revision 5.\n']
else:
expected_output = ['* Verifying repository metadata ...\n', '* Verified revision 0.\n', '* Verified revision 1.\n', '* Verified revision 2.\n', '* Verified revision 3.\n', '* Verified revision 4.\n', '* Verified revision 5.\n']
svntest.actions.run_and_verify_svnadmin(expected_output, [], 'verify', sbox.repo_dir) | -3,766,254,840,301,204,000 | verify packed with small shards | subversion/tests/cmdline/svnadmin_tests.py | verify_packed | auycro/subversion | python | @SkipUnless(svntest.main.fs_has_pack)
def verify_packed(sbox):
sbox.build()
patch_format(sbox.repo_dir, shard_size=2)
sbox.simple_append('iota', 'Line.\n')
sbox.simple_append('A/D/gamma', 'Another line.\n')
sbox.simple_commit(message='r2')
sbox.simple_propset('foo', 'bar', 'iota')
sbox.simple_propset('foo', 'baz', 'A/mu')
sbox.simple_commit(message='r3')
sbox.simple_rm('A/C')
sbox.simple_copy('A/B/E', 'A/B/E1')
sbox.simple_move('A/mu', 'A/B/mu')
sbox.simple_commit(message='r4')
sbox.simple_propdel('foo', 'A/B/mu')
sbox.simple_commit(message='r5')
if (svntest.main.is_fs_type_fsfs and svntest.main.options.fsfs_packing):
pass
else:
expected_output = ['Packing revisions in shard 0...done.\n', 'Packing revisions in shard 1...done.\n', 'Packing revisions in shard 2...done.\n']
svntest.actions.run_and_verify_svnadmin(expected_output, [], 'pack', sbox.repo_dir)
if svntest.main.is_fs_log_addressing():
expected_output = ['* Verifying metadata at revision 0 ...\n', '* Verifying metadata at revision 2 ...\n', '* Verifying metadata at revision 4 ...\n', '* Verifying repository metadata ...\n', '* Verified revision 0.\n', '* Verified revision 1.\n', '* Verified revision 2.\n', '* Verified revision 3.\n', '* Verified revision 4.\n', '* Verified revision 5.\n']
else:
expected_output = ['* Verifying repository metadata ...\n', '* Verified revision 0.\n', '* Verified revision 1.\n', '* Verified revision 2.\n', '* Verified revision 3.\n', '* Verified revision 4.\n', '* Verified revision 5.\n']
svntest.actions.run_and_verify_svnadmin(expected_output, [], 'verify', sbox.repo_dir) |
def freeze_freeze(sbox):
'svnadmin freeze svnadmin freeze (some-cmd)'
sbox.build(create_wc=False, read_only=True)
(second_repo_dir, _) = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, second_repo_dir)
if (svntest.main.is_fs_type_fsx() or (svntest.main.is_fs_type_fsfs() and (svntest.main.options.server_minor_version < 9))):
svntest.actions.run_and_verify_svnadmin([], None, 'setuuid', second_repo_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'freeze', '--', sbox.repo_dir, svntest.main.svnadmin_binary, 'freeze', '--', second_repo_dir, sys.executable, '-c', 'True')
arg_file = sbox.get_tempname()
svntest.main.file_write(arg_file, ('%s\n%s\n' % (sbox.repo_dir, second_repo_dir)))
svntest.actions.run_and_verify_svnadmin(None, [], 'freeze', '-F', arg_file, '--', sys.executable, '-c', 'True') | -2,789,566,124,145,146,000 | svnadmin freeze svnadmin freeze (some-cmd) | subversion/tests/cmdline/svnadmin_tests.py | freeze_freeze | auycro/subversion | python | def freeze_freeze(sbox):
sbox.build(create_wc=False, read_only=True)
(second_repo_dir, _) = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(None, [], 'hotcopy', sbox.repo_dir, second_repo_dir)
if (svntest.main.is_fs_type_fsx() or (svntest.main.is_fs_type_fsfs() and (svntest.main.options.server_minor_version < 9))):
svntest.actions.run_and_verify_svnadmin([], None, 'setuuid', second_repo_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'freeze', '--', sbox.repo_dir, svntest.main.svnadmin_binary, 'freeze', '--', second_repo_dir, sys.executable, '-c', 'True')
arg_file = sbox.get_tempname()
svntest.main.file_write(arg_file, ('%s\n%s\n' % (sbox.repo_dir, second_repo_dir)))
svntest.actions.run_and_verify_svnadmin(None, [], 'freeze', '-F', arg_file, '--', sys.executable, '-c', 'True') |
def verify_metadata_only(sbox):
'verify metadata only'
sbox.build(create_wc=False)
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir, '--metadata-only')
if errput:
raise SVNUnexpectedStderr(errput)
if svntest.main.is_fs_log_addressing():
svntest.verify.compare_and_display_lines("Unexpected error while running 'svnadmin verify'.", 'STDOUT', ['* Verifying metadata at revision 0 ...\n', '* Verifying repository metadata ...\n'], output)
elif (svntest.main.fs_has_rep_sharing() and (not svntest.main.is_fs_type_bdb())):
svntest.verify.compare_and_display_lines("Unexpected error while running 'svnadmin verify'.", 'STDOUT', ['* Verifying repository metadata ...\n'], output)
else:
svntest.verify.compare_and_display_lines("Unexpected error while running 'svnadmin verify'.", 'STDOUT', [], output) | 7,340,750,279,070,634,000 | verify metadata only | subversion/tests/cmdline/svnadmin_tests.py | verify_metadata_only | auycro/subversion | python | def verify_metadata_only(sbox):
sbox.build(create_wc=False)
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir, '--metadata-only')
if errput:
raise SVNUnexpectedStderr(errput)
if svntest.main.is_fs_log_addressing():
svntest.verify.compare_and_display_lines("Unexpected error while running 'svnadmin verify'.", 'STDOUT', ['* Verifying metadata at revision 0 ...\n', '* Verifying repository metadata ...\n'], output)
elif (svntest.main.fs_has_rep_sharing() and (not svntest.main.is_fs_type_bdb())):
svntest.verify.compare_and_display_lines("Unexpected error while running 'svnadmin verify'.", 'STDOUT', ['* Verifying repository metadata ...\n'], output)
else:
svntest.verify.compare_and_display_lines("Unexpected error while running 'svnadmin verify'.", 'STDOUT', [], output) |
@Skip(svntest.main.is_fs_type_bdb)
def verify_quickly(sbox):
'verify quickly using metadata'
sbox.build(create_wc=False)
rev_file = open(fsfs_file(sbox.repo_dir, 'revs', '1'), 'r+b')
rev_file.seek(8)
rev_file.write(b'#')
rev_file.close()
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir, '--metadata-only')
if svntest.main.is_fs_log_addressing():
exp_out = svntest.verify.RegexListOutput([])
exp_err = svntest.verify.RegexListOutput(['svnadmin: E160004:.*'], False)
else:
exp_out = svntest.verify.RegexListOutput([])
exp_err = svntest.verify.RegexListOutput([])
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
svntest.main.safe_rmtree(sbox.repo_dir, True) | -3,595,489,181,338,386,000 | verify quickly using metadata | subversion/tests/cmdline/svnadmin_tests.py | verify_quickly | auycro/subversion | python | @Skip(svntest.main.is_fs_type_bdb)
def verify_quickly(sbox):
sbox.build(create_wc=False)
rev_file = open(fsfs_file(sbox.repo_dir, 'revs', '1'), 'r+b')
rev_file.seek(8)
rev_file.write(b'#')
rev_file.close()
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir, '--metadata-only')
if svntest.main.is_fs_log_addressing():
exp_out = svntest.verify.RegexListOutput([])
exp_err = svntest.verify.RegexListOutput(['svnadmin: E160004:.*'], False)
else:
exp_out = svntest.verify.RegexListOutput([])
exp_err = svntest.verify.RegexListOutput([])
if svntest.main.fs_has_rep_sharing():
exp_out.insert(0, '.*Verifying.*metadata.*')
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.", output, errput, exp_out, exp_err):
raise svntest.Failure
svntest.main.safe_rmtree(sbox.repo_dir, True) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def fsfs_hotcopy_progress(sbox):
'hotcopy progress reporting'
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False, empty=True)
patch_format(sbox.repo_dir, shard_size=3)
(inc_backup_dir, inc_backup_url) = sbox.add_repo_path('incremental-backup')
expected_full = ['* Copied revision 0.\n']
expected_incremental = ['* Copied revision 0.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-0')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
for i in range(3):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
expected_full = ['* Copied revision 0.\n', '* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n']
expected_incremental = ['* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-1')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'pack', sbox.repo_dir)
expected_full = ['* Copied revisions from 0 to 2.\n', '* Copied revision 3.\n']
expected_incremental = ['* Copied revisions from 0 to 2.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-2')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
for i in range(4, 6):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
svntest.actions.run_and_verify_svnadmin(None, [], 'pack', sbox.repo_dir)
for i in range(6, 8):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
expected_full = ['* Copied revisions from 0 to 2.\n', '* Copied revisions from 3 to 5.\n', '* Copied revision 6.\n', '* Copied revision 7.\n']
expected_incremental = ['* Copied revisions from 3 to 5.\n', '* Copied revision 6.\n', '* Copied revision 7.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-3')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir) | -1,912,052,797,931,870,700 | hotcopy progress reporting | subversion/tests/cmdline/svnadmin_tests.py | fsfs_hotcopy_progress | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def fsfs_hotcopy_progress(sbox):
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False, empty=True)
patch_format(sbox.repo_dir, shard_size=3)
(inc_backup_dir, inc_backup_url) = sbox.add_repo_path('incremental-backup')
expected_full = ['* Copied revision 0.\n']
expected_incremental = ['* Copied revision 0.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-0')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
for i in range(3):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
expected_full = ['* Copied revision 0.\n', '* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n']
expected_incremental = ['* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-1')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], 'pack', sbox.repo_dir)
expected_full = ['* Copied revisions from 0 to 2.\n', '* Copied revision 3.\n']
expected_incremental = ['* Copied revisions from 0 to 2.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-2')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
for i in range(4, 6):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
svntest.actions.run_and_verify_svnadmin(None, [], 'pack', sbox.repo_dir)
for i in range(6, 8):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
expected_full = ['* Copied revisions from 0 to 2.\n', '* Copied revisions from 3 to 5.\n', '* Copied revision 6.\n', '* Copied revision 7.\n']
expected_incremental = ['* Copied revisions from 3 to 5.\n', '* Copied revision 6.\n', '* Copied revision 7.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-3')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_progress_with_revprop_changes(sbox):
'incremental hotcopy progress with changed revprops'
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False, empty=True)
for i in range(6):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
expected_output = ['* Copied revision 0.\n', '* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n', '* Copied revision 4.\n', '* Copied revision 5.\n', '* Copied revision 6.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(expected_output, [], 'hotcopy', sbox.repo_dir, backup_dir)
revprop_file = sbox.get_tempname()
svntest.main.file_write(revprop_file, 'Modified log message.')
for i in [1, 3, 6]:
svntest.actions.run_and_verify_svnadmin(None, [], 'setrevprop', sbox.repo_dir, '-r', i, 'svn:log', revprop_file)
expected_output = ['* Copied revision 1.\n', '* Copied revision 3.\n', '* Copied revision 6.\n']
svntest.actions.run_and_verify_svnadmin(expected_output, [], 'hotcopy', '--incremental', sbox.repo_dir, backup_dir) | -8,229,865,375,370,251,000 | incremental hotcopy progress with changed revprops | subversion/tests/cmdline/svnadmin_tests.py | fsfs_hotcopy_progress_with_revprop_changes | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_progress_with_revprop_changes(sbox):
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc=False, empty=True)
for i in range(6):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
expected_output = ['* Copied revision 0.\n', '* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n', '* Copied revision 4.\n', '* Copied revision 5.\n', '* Copied revision 6.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(expected_output, [], 'hotcopy', sbox.repo_dir, backup_dir)
revprop_file = sbox.get_tempname()
svntest.main.file_write(revprop_file, 'Modified log message.')
for i in [1, 3, 6]:
svntest.actions.run_and_verify_svnadmin(None, [], 'setrevprop', sbox.repo_dir, '-r', i, 'svn:log', revprop_file)
expected_output = ['* Copied revision 1.\n', '* Copied revision 3.\n', '* Copied revision 6.\n']
svntest.actions.run_and_verify_svnadmin(expected_output, [], 'hotcopy', '--incremental', sbox.repo_dir, backup_dir) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_progress_old(sbox):
'hotcopy --compatible-version=1.3 progress'
sbox.build(create_wc=False, empty=True, minor_version=3)
(inc_backup_dir, inc_backup_url) = sbox.add_repo_path('incremental-backup')
expected_full = ['* Copied revision 0.\n']
expected_incremental = ['* Copied revision 0.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-0')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
for i in range(3):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
expected_full = ['* Copied revision 0.\n', '* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n']
expected_incremental = ['* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-1')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir) | -878,062,420,884,698,800 | hotcopy --compatible-version=1.3 progress | subversion/tests/cmdline/svnadmin_tests.py | fsfs_hotcopy_progress_old | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_progress_old(sbox):
sbox.build(create_wc=False, empty=True, minor_version=3)
(inc_backup_dir, inc_backup_url) = sbox.add_repo_path('incremental-backup')
expected_full = ['* Copied revision 0.\n']
expected_incremental = ['* Copied revision 0.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-0')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir)
for i in range(3):
svntest.actions.run_and_verify_svn(None, [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + ('/dir-%i' % i)))
expected_full = ['* Copied revision 0.\n', '* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n']
expected_incremental = ['* Copied revision 1.\n', '* Copied revision 2.\n', '* Copied revision 3.\n']
(backup_dir, backup_url) = sbox.add_repo_path('backup-1')
svntest.actions.run_and_verify_svnadmin(expected_full, [], 'hotcopy', sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [], 'hotcopy', '--incremental', sbox.repo_dir, inc_backup_dir) |
@SkipUnless(svntest.main.fs_has_unique_freeze)
def freeze_same_uuid(sbox):
'freeze multiple repositories with same UUID'
sbox.build(create_wc=False)
(first_repo_dir, _) = sbox.add_repo_path('first')
(second_repo_dir, _) = sbox.add_repo_path('second')
svntest.main.create_repos(first_repo_dir)
svntest.main.create_repos(second_repo_dir)
dump_path = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'skeleton_repos.dump')
dump_contents = open(dump_path, 'rb').readlines()
svntest.actions.run_and_verify_load(first_repo_dir, dump_contents)
svntest.actions.run_and_verify_load(second_repo_dir, dump_contents)
arg_file = sbox.get_tempname()
svntest.main.file_write(arg_file, ('%s\n%s\n' % (first_repo_dir, second_repo_dir)))
svntest.actions.run_and_verify_svnadmin(None, None, 'freeze', '-F', arg_file, '--', sys.executable, '-c', 'True') | -594,798,748,947,338,800 | freeze multiple repositories with same UUID | subversion/tests/cmdline/svnadmin_tests.py | freeze_same_uuid | auycro/subversion | python | @SkipUnless(svntest.main.fs_has_unique_freeze)
def freeze_same_uuid(sbox):
sbox.build(create_wc=False)
(first_repo_dir, _) = sbox.add_repo_path('first')
(second_repo_dir, _) = sbox.add_repo_path('second')
svntest.main.create_repos(first_repo_dir)
svntest.main.create_repos(second_repo_dir)
dump_path = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'skeleton_repos.dump')
dump_contents = open(dump_path, 'rb').readlines()
svntest.actions.run_and_verify_load(first_repo_dir, dump_contents)
svntest.actions.run_and_verify_load(second_repo_dir, dump_contents)
arg_file = sbox.get_tempname()
svntest.main.file_write(arg_file, ('%s\n%s\n' % (first_repo_dir, second_repo_dir)))
svntest.actions.run_and_verify_svnadmin(None, None, 'freeze', '-F', arg_file, '--', sys.executable, '-c', 'True') |
@Skip(svntest.main.is_fs_type_fsx)
def upgrade(sbox):
'upgrade --compatible-version=1.3'
sbox.build(create_wc=False, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], 'upgrade', sbox.repo_dir)
svntest.actions.run_and_verify_svn(['Committing transaction...\n', 'Committed revision 2.\n'], [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + '/dir')) | -4,504,573,146,459,374,000 | upgrade --compatible-version=1.3 | subversion/tests/cmdline/svnadmin_tests.py | upgrade | auycro/subversion | python | @Skip(svntest.main.is_fs_type_fsx)
def upgrade(sbox):
sbox.build(create_wc=False, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], 'upgrade', sbox.repo_dir)
svntest.actions.run_and_verify_svn(['Committing transaction...\n', 'Committed revision 2.\n'], [], 'mkdir', '-m', svntest.main.make_log_msg(), (sbox.repo_url + '/dir')) |
def load_txdelta(sbox):
'exercising svn_txdelta_target on BDB'
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'load_txdelta.dump.gz')
dumpfile = gzip.open(dumpfile_location, 'rb').readlines()
load_dumpstream(sbox, dumpfile)
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.", None, output, None, '.*Verified revision *'):
raise svntest.Failure | -2,468,967,267,577,584,600 | exercising svn_txdelta_target on BDB | subversion/tests/cmdline/svnadmin_tests.py | load_txdelta | auycro/subversion | python | def load_txdelta(sbox):
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'load_txdelta.dump.gz')
dumpfile = gzip.open(dumpfile_location, 'rb').readlines()
load_dumpstream(sbox, dumpfile)
(exit_code, output, errput) = svntest.main.run_svnadmin('verify', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.", None, output, None, '.*Verified revision *'):
raise svntest.Failure |
@Issues(4563)
def load_no_svndate_r0(sbox):
'load without svn:date on r0'
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svnlook([' svn:date\n'], [], 'proplist', '--revprop', '-r0', sbox.repo_dir)
dump_old = [b'SVN-fs-dump-format-version: 2\n', b'\n', b'UUID: bf52886d-358d-4493-a414-944a6e5ad4f5\n', b'\n', b'Revision-number: 0\n', b'Prop-content-length: 10\n', b'Content-length: 10\n', b'\n', b'PROPS-END\n', b'\n']
svntest.actions.run_and_verify_load(sbox.repo_dir, dump_old)
svntest.actions.run_and_verify_svnlook([], [], 'proplist', '--revprop', '-r0', sbox.repo_dir) | -9,160,021,234,037,082,000 | load without svn:date on r0 | subversion/tests/cmdline/svnadmin_tests.py | load_no_svndate_r0 | auycro/subversion | python | @Issues(4563)
def load_no_svndate_r0(sbox):
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svnlook([' svn:date\n'], [], 'proplist', '--revprop', '-r0', sbox.repo_dir)
dump_old = [b'SVN-fs-dump-format-version: 2\n', b'\n', b'UUID: bf52886d-358d-4493-a414-944a6e5ad4f5\n', b'\n', b'Revision-number: 0\n', b'Prop-content-length: 10\n', b'Content-length: 10\n', b'\n', b'PROPS-END\n', b'\n']
svntest.actions.run_and_verify_load(sbox.repo_dir, dump_old)
svntest.actions.run_and_verify_svnlook([], [], 'proplist', '--revprop', '-r0', sbox.repo_dir) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
def hotcopy_read_only(sbox):
"'svnadmin hotcopy' a read-only source repository"
sbox.build()
svntest.main.chmod_tree(sbox.repo_dir, 0, svntest.main.S_ALL_WRITE)
(backup_dir, backup_url) = sbox.add_repo_path('backup')
(exit_code, output, errput) = svntest.main.run_svnadmin('hotcopy', sbox.repo_dir, backup_dir)
svntest.main.chmod_tree(sbox.repo_dir, svntest.main.S_ALL_WRITE, svntest.main.S_ALL_WRITE)
if errput:
logger.warn('Error: hotcopy failed')
raise SVNUnexpectedStderr(errput) | -8,641,223,759,628,086,000 | 'svnadmin hotcopy' a read-only source repository | subversion/tests/cmdline/svnadmin_tests.py | hotcopy_read_only | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
def hotcopy_read_only(sbox):
sbox.build()
svntest.main.chmod_tree(sbox.repo_dir, 0, svntest.main.S_ALL_WRITE)
(backup_dir, backup_url) = sbox.add_repo_path('backup')
(exit_code, output, errput) = svntest.main.run_svnadmin('hotcopy', sbox.repo_dir, backup_dir)
svntest.main.chmod_tree(sbox.repo_dir, svntest.main.S_ALL_WRITE, svntest.main.S_ALL_WRITE)
if errput:
logger.warn('Error: hotcopy failed')
raise SVNUnexpectedStderr(errput) |
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def fsfs_pack_non_sharded(sbox):
"'svnadmin pack' on a non-sharded repository"
sbox.build(create_wc=False, minor_version=min(svntest.main.options.server_minor_version, 3))
if is_sharded(sbox.repo_dir):
raise svntest.Skip('sharded pre-cooked repository')
svntest.actions.run_and_verify_svnadmin(None, [], 'upgrade', sbox.repo_dir)
svntest.actions.run_and_verify_svnadmin(['svnadmin: Warning - this repository is not sharded. Packing has no effect.\n'], [], 'pack', sbox.repo_dir) | 3,323,902,558,173,068,000 | 'svnadmin pack' on a non-sharded repository | subversion/tests/cmdline/svnadmin_tests.py | fsfs_pack_non_sharded | auycro/subversion | python | @SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def fsfs_pack_non_sharded(sbox):
sbox.build(create_wc=False, minor_version=min(svntest.main.options.server_minor_version, 3))
if is_sharded(sbox.repo_dir):
raise svntest.Skip('sharded pre-cooked repository')
svntest.actions.run_and_verify_svnadmin(None, [], 'upgrade', sbox.repo_dir)
svntest.actions.run_and_verify_svnadmin(['svnadmin: Warning - this repository is not sharded. Packing has no effect.\n'], [], 'pack', sbox.repo_dir) |
def load_revprops(sbox):
'svnadmin load-revprops'
sbox.build(create_wc=False, empty=True)
dump_path = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'skeleton_repos.dump')
dump_contents = open(dump_path, 'rb').readlines()
load_and_verify_dumpstream(sbox, None, [], None, False, dump_contents)
svntest.actions.run_and_verify_svnlook(['Initial setup...\n', '\n'], [], 'log', '-r1', sbox.repo_dir)
input_file = sbox.get_tempname()
svntest.main.file_write(input_file, 'Modified log message...\n')
svntest.actions.run_and_verify_svnadmin([], [], 'setlog', '--bypass-hooks', '-r1', sbox.repo_dir, input_file)
svntest.actions.run_and_verify_svnlook(['Modified log message...\n', '\n'], [], 'log', '-r1', sbox.repo_dir)
svntest.main.run_command_stdin(svntest.main.svnadmin_binary, None, 0, True, dump_contents, 'load-revprops', sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(['Initial setup...\n', '\n'], [], 'log', '-r1', sbox.repo_dir) | 8,441,304,003,097,096,000 | svnadmin load-revprops | subversion/tests/cmdline/svnadmin_tests.py | load_revprops | auycro/subversion | python | def load_revprops(sbox):
sbox.build(create_wc=False, empty=True)
dump_path = os.path.join(os.path.dirname(sys.argv[0]), 'svnadmin_tests_data', 'skeleton_repos.dump')
dump_contents = open(dump_path, 'rb').readlines()
load_and_verify_dumpstream(sbox, None, [], None, False, dump_contents)
svntest.actions.run_and_verify_svnlook(['Initial setup...\n', '\n'], [], 'log', '-r1', sbox.repo_dir)
input_file = sbox.get_tempname()
svntest.main.file_write(input_file, 'Modified log message...\n')
svntest.actions.run_and_verify_svnadmin([], [], 'setlog', '--bypass-hooks', '-r1', sbox.repo_dir, input_file)
svntest.actions.run_and_verify_svnlook(['Modified log message...\n', '\n'], [], 'log', '-r1', sbox.repo_dir)
svntest.main.run_command_stdin(svntest.main.svnadmin_binary, None, 0, True, dump_contents, 'load-revprops', sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(['Initial setup...\n', '\n'], [], 'log', '-r1', sbox.repo_dir) |
def dump_revprops(sbox):
'svnadmin dump-revprops'
sbox.build(create_wc=False)
(exit_code, dump_contents, errput) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump-revprops', '-q', sbox.repo_dir)
for line in dump_contents:
if (line.find(b'Node-path: ') > (- 1)):
logger.warn('Error: path change found in revprops-only dump.')
raise svntest.Failure
(exit_code, log_msg, errput) = svntest.actions.run_and_verify_svnlook(None, [], 'log', '-r1', sbox.repo_dir)
input_file = sbox.get_tempname()
svntest.main.file_write(input_file, 'Modified log message...\n')
svntest.actions.run_and_verify_svnadmin([], [], 'setlog', '--bypass-hooks', '-r1', sbox.repo_dir, input_file)
svntest.actions.run_and_verify_svnlook(['Modified log message...\n', '\n'], [], 'log', '-r1', sbox.repo_dir)
svntest.main.run_command_stdin(svntest.main.svnadmin_binary, None, 0, True, dump_contents, 'load-revprops', sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(log_msg, [], 'log', '-r1', sbox.repo_dir) | 8,094,951,945,586,859,000 | svnadmin dump-revprops | subversion/tests/cmdline/svnadmin_tests.py | dump_revprops | auycro/subversion | python | def dump_revprops(sbox):
sbox.build(create_wc=False)
(exit_code, dump_contents, errput) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump-revprops', '-q', sbox.repo_dir)
for line in dump_contents:
if (line.find(b'Node-path: ') > (- 1)):
logger.warn('Error: path change found in revprops-only dump.')
raise svntest.Failure
(exit_code, log_msg, errput) = svntest.actions.run_and_verify_svnlook(None, [], 'log', '-r1', sbox.repo_dir)
input_file = sbox.get_tempname()
svntest.main.file_write(input_file, 'Modified log message...\n')
svntest.actions.run_and_verify_svnadmin([], [], 'setlog', '--bypass-hooks', '-r1', sbox.repo_dir, input_file)
svntest.actions.run_and_verify_svnlook(['Modified log message...\n', '\n'], [], 'log', '-r1', sbox.repo_dir)
svntest.main.run_command_stdin(svntest.main.svnadmin_binary, None, 0, True, dump_contents, 'load-revprops', sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(log_msg, [], 'log', '-r1', sbox.repo_dir) |
@XFail(svntest.main.is_fs_type_fsx)
@Issue(4598)
def dump_no_op_change(sbox):
'svnadmin dump with no-op changes'
sbox.build(create_wc=False, empty=True)
empty_file = sbox.get_tempname()
svntest.main.file_write(empty_file, '')
svntest.actions.run_and_verify_svnmucc(None, [], '-U', sbox.repo_url, '-m', svntest.main.make_log_msg(), 'put', empty_file, 'bar')
svntest.actions.run_and_verify_svnmucc(None, [], '-U', sbox.repo_url, '-m', svntest.main.make_log_msg(), 'put', empty_file, 'bar')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
(_, expected, _) = svntest.actions.run_and_verify_svn(None, [], 'log', '-v', '-r2', sbox.repo_url)
found = [True for line in expected if (line.find('M /bar\n') != (- 1))]
if (not found):
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v', '-r2', sbox2.repo_url)
(_, expected, _) = svntest.actions.run_and_verify_svn(None, [], 'log', '-v', (sbox.repo_url + '/bar'))
found = [True for line in expected if (line.find('M /bar\n') != (- 1))]
if (not found):
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v', (sbox2.repo_url + '/bar')) | 4,759,590,128,040,377,000 | svnadmin dump with no-op changes | subversion/tests/cmdline/svnadmin_tests.py | dump_no_op_change | auycro/subversion | python | @XFail(svntest.main.is_fs_type_fsx)
@Issue(4598)
def dump_no_op_change(sbox):
sbox.build(create_wc=False, empty=True)
empty_file = sbox.get_tempname()
svntest.main.file_write(empty_file, )
svntest.actions.run_and_verify_svnmucc(None, [], '-U', sbox.repo_url, '-m', svntest.main.make_log_msg(), 'put', empty_file, 'bar')
svntest.actions.run_and_verify_svnmucc(None, [], '-U', sbox.repo_url, '-m', svntest.main.make_log_msg(), 'put', empty_file, 'bar')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
(_, expected, _) = svntest.actions.run_and_verify_svn(None, [], 'log', '-v', '-r2', sbox.repo_url)
found = [True for line in expected if (line.find('M /bar\n') != (- 1))]
if (not found):
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v', '-r2', sbox2.repo_url)
(_, expected, _) = svntest.actions.run_and_verify_svn(None, [], 'log', '-v', (sbox.repo_url + '/bar'))
found = [True for line in expected if (line.find('M /bar\n') != (- 1))]
if (not found):
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v', (sbox2.repo_url + '/bar')) |
@XFail(svntest.main.is_fs_type_bdb)
@XFail(svntest.main.is_fs_type_fsx)
@Issue(4623)
def dump_no_op_prop_change(sbox):
'svnadmin dump with no-op property change'
sbox.build(create_wc=False, empty=True)
empty_file = sbox.get_tempname()
svntest.main.file_write(empty_file, '')
svntest.actions.run_and_verify_svnmucc(None, [], '-U', sbox.repo_url, '-m', svntest.main.make_log_msg(), 'put', empty_file, 'bar', 'propset', 'pname', 'pval', 'bar')
svntest.actions.run_and_verify_svnmucc(None, [], '-U', sbox.repo_url, '-m', svntest.main.make_log_msg(), 'propset', 'pname', 'pval', 'bar')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
(_, expected, _) = svntest.actions.run_and_verify_svn(None, [], 'log', '-v', '-r2', sbox.repo_url)
found = [True for line in expected if (line.find('M /bar\n') != (- 1))]
if (not found):
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v', '-r2', sbox2.repo_url)
(_, expected, _) = svntest.actions.run_and_verify_svn(None, [], 'log', '-v', (sbox.repo_url + '/bar'))
found = [True for line in expected if (line.find('M /bar\n') != (- 1))]
if (not found):
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v', (sbox2.repo_url + '/bar')) | 881,411,580,517,528,000 | svnadmin dump with no-op property change | subversion/tests/cmdline/svnadmin_tests.py | dump_no_op_prop_change | auycro/subversion | python | @XFail(svntest.main.is_fs_type_bdb)
@XFail(svntest.main.is_fs_type_fsx)
@Issue(4623)
def dump_no_op_prop_change(sbox):
sbox.build(create_wc=False, empty=True)
empty_file = sbox.get_tempname()
svntest.main.file_write(empty_file, )
svntest.actions.run_and_verify_svnmucc(None, [], '-U', sbox.repo_url, '-m', svntest.main.make_log_msg(), 'put', empty_file, 'bar', 'propset', 'pname', 'pval', 'bar')
svntest.actions.run_and_verify_svnmucc(None, [], '-U', sbox.repo_url, '-m', svntest.main.make_log_msg(), 'propset', 'pname', 'pval', 'bar')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
(_, expected, _) = svntest.actions.run_and_verify_svn(None, [], 'log', '-v', '-r2', sbox.repo_url)
found = [True for line in expected if (line.find('M /bar\n') != (- 1))]
if (not found):
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v', '-r2', sbox2.repo_url)
(_, expected, _) = svntest.actions.run_and_verify_svn(None, [], 'log', '-v', (sbox.repo_url + '/bar'))
found = [True for line in expected if (line.find('M /bar\n') != (- 1))]
if (not found):
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v', (sbox2.repo_url + '/bar')) |
def load_no_flush_to_disk(sbox):
'svnadmin load --no-flush-to-disk'
sbox.build(empty=True)
dump = clean_dumpfile()
expected = [svntest.wc.State('', {'A': svntest.wc.StateItem(contents='text\n', props={'svn:keywords': 'Id'})})]
load_and_verify_dumpstream(sbox, [], [], expected, True, dump, '--no-flush-to-disk', '--ignore-uuid') | -502,437,378,718,735,040 | svnadmin load --no-flush-to-disk | subversion/tests/cmdline/svnadmin_tests.py | load_no_flush_to_disk | auycro/subversion | python | def load_no_flush_to_disk(sbox):
sbox.build(empty=True)
dump = clean_dumpfile()
expected = [svntest.wc.State(, {'A': svntest.wc.StateItem(contents='text\n', props={'svn:keywords': 'Id'})})]
load_and_verify_dumpstream(sbox, [], [], expected, True, dump, '--no-flush-to-disk', '--ignore-uuid') |
def dump_to_file(sbox):
'svnadmin dump --file ARG'
sbox.build(create_wc=False, empty=False)
expected_dump = svntest.actions.run_and_verify_dump(sbox.repo_dir)
file = sbox.get_tempname()
svntest.actions.run_and_verify_svnadmin2([], ['* Dumped revision 0.\n', '* Dumped revision 1.\n'], 0, 'dump', '--file', file, sbox.repo_dir)
actual_dump = open(file, 'rb').readlines()
svntest.verify.compare_dump_files(None, None, expected_dump, actual_dump)
file = sbox.get_tempname()
svntest.main.file_write(file, '')
svntest.actions.run_and_verify_svnadmin2([], ['* Dumped revision 0.\n', '* Dumped revision 1.\n'], 0, 'dump', '--file', file, sbox.repo_dir)
actual_dump = open(file, 'rb').readlines()
svntest.verify.compare_dump_files(None, None, expected_dump, actual_dump) | -7,564,124,315,480,732,000 | svnadmin dump --file ARG | subversion/tests/cmdline/svnadmin_tests.py | dump_to_file | auycro/subversion | python | def dump_to_file(sbox):
sbox.build(create_wc=False, empty=False)
expected_dump = svntest.actions.run_and_verify_dump(sbox.repo_dir)
file = sbox.get_tempname()
svntest.actions.run_and_verify_svnadmin2([], ['* Dumped revision 0.\n', '* Dumped revision 1.\n'], 0, 'dump', '--file', file, sbox.repo_dir)
actual_dump = open(file, 'rb').readlines()
svntest.verify.compare_dump_files(None, None, expected_dump, actual_dump)
file = sbox.get_tempname()
svntest.main.file_write(file, )
svntest.actions.run_and_verify_svnadmin2([], ['* Dumped revision 0.\n', '* Dumped revision 1.\n'], 0, 'dump', '--file', file, sbox.repo_dir)
actual_dump = open(file, 'rb').readlines()
svntest.verify.compare_dump_files(None, None, expected_dump, actual_dump) |
def load_from_file(sbox):
'svnadmin load --file ARG'
sbox.build(empty=True)
file = sbox.get_tempname()
with open(file, 'wb') as f:
f.writelines(clean_dumpfile())
svntest.actions.run_and_verify_svnadmin2(None, [], 0, 'load', '--file', file, '--ignore-uuid', sbox.repo_dir)
expected_tree = svntest.wc.State('', {'A': svntest.wc.StateItem(contents='text\n', props={'svn:keywords': 'Id'})})
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'update', sbox.wc_dir)
svntest.actions.verify_disk(sbox.wc_dir, expected_tree, check_props=True) | 8,242,333,286,280,110,000 | svnadmin load --file ARG | subversion/tests/cmdline/svnadmin_tests.py | load_from_file | auycro/subversion | python | def load_from_file(sbox):
sbox.build(empty=True)
file = sbox.get_tempname()
with open(file, 'wb') as f:
f.writelines(clean_dumpfile())
svntest.actions.run_and_verify_svnadmin2(None, [], 0, 'load', '--file', file, '--ignore-uuid', sbox.repo_dir)
expected_tree = svntest.wc.State(, {'A': svntest.wc.StateItem(contents='text\n', props={'svn:keywords': 'Id'})})
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'update', sbox.wc_dir)
svntest.actions.verify_disk(sbox.wc_dir, expected_tree, check_props=True) |
def dump_exclude(sbox):
'svnadmin dump with excluded paths'
sbox.build(create_wc=False)
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--exclude', '/A/D/H', '--exclude', '/A/B/E', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /A\n'), re.escape(' A /A/B\n'), re.escape(' A /A/B/F\n'), re.escape(' A /A/B/lambda\n'), re.escape(' A /A/C\n'), re.escape(' A /A/D\n'), re.escape(' A /A/D/G\n'), re.escape(' A /A/D/G/pi\n'), re.escape(' A /A/D/G/rho\n'), re.escape(' A /A/D/G/tau\n'), re.escape(' A /A/D/gamma\n'), re.escape(' A /A/mu\n'), re.escape(' A /iota\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) | -6,663,219,952,791,313,000 | svnadmin dump with excluded paths | subversion/tests/cmdline/svnadmin_tests.py | dump_exclude | auycro/subversion | python | def dump_exclude(sbox):
sbox.build(create_wc=False)
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--exclude', '/A/D/H', '--exclude', '/A/B/E', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /A\n'), re.escape(' A /A/B\n'), re.escape(' A /A/B/F\n'), re.escape(' A /A/B/lambda\n'), re.escape(' A /A/C\n'), re.escape(' A /A/D\n'), re.escape(' A /A/D/G\n'), re.escape(' A /A/D/G/pi\n'), re.escape(' A /A/D/G/rho\n'), re.escape(' A /A/D/G/tau\n'), re.escape(' A /A/D/gamma\n'), re.escape(' A /A/mu\n'), re.escape(' A /iota\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) |
def dump_exclude_copysource(sbox):
'svnadmin dump with excluded copysource'
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/trunk'), (sbox.repo_url + '/branches'), (sbox.repo_url + '/tags'), '-m', 'Create repository structure.')
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'copy', (sbox.repo_url + '/trunk'), (sbox.repo_url + '/branches/branch1'), '-m', 'Create branch.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--exclude', '/trunk', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r2\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /branches/branch1\n'), '-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /branches\n'), re.escape(' A /tags\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) | -7,391,016,316,495,740,000 | svnadmin dump with excluded copysource | subversion/tests/cmdline/svnadmin_tests.py | dump_exclude_copysource | auycro/subversion | python | def dump_exclude_copysource(sbox):
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/trunk'), (sbox.repo_url + '/branches'), (sbox.repo_url + '/tags'), '-m', 'Create repository structure.')
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'copy', (sbox.repo_url + '/trunk'), (sbox.repo_url + '/branches/branch1'), '-m', 'Create branch.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--exclude', '/trunk', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r2\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /branches/branch1\n'), '-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /branches\n'), re.escape(' A /tags\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) |
def dump_include(sbox):
'svnadmin dump with included paths'
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/A'), (sbox.repo_url + '/B'), (sbox.repo_url + '/C'), '-m', 'Create folder.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--include', '/A', '--include', '/C', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /A\n'), re.escape(' A /C\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) | 8,657,263,414,146,474,000 | svnadmin dump with included paths | subversion/tests/cmdline/svnadmin_tests.py | dump_include | auycro/subversion | python | def dump_include(sbox):
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/A'), (sbox.repo_url + '/B'), (sbox.repo_url + '/C'), '-m', 'Create folder.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--include', '/A', '--include', '/C', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /A\n'), re.escape(' A /C\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) |
def dump_not_include_copysource(sbox):
'svnadmin dump with not included copysource'
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/trunk'), (sbox.repo_url + '/branches'), (sbox.repo_url + '/tags'), '-m', 'Create repository structure.')
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'copy', (sbox.repo_url + '/trunk'), (sbox.repo_url + '/branches/branch1'), '-m', 'Create branch.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--include', '/branches', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r2\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /branches/branch1\n'), '-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /branches\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) | 1,605,245,060,668,169,700 | svnadmin dump with not included copysource | subversion/tests/cmdline/svnadmin_tests.py | dump_not_include_copysource | auycro/subversion | python | def dump_not_include_copysource(sbox):
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/trunk'), (sbox.repo_url + '/branches'), (sbox.repo_url + '/tags'), '-m', 'Create repository structure.')
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'copy', (sbox.repo_url + '/trunk'), (sbox.repo_url + '/branches/branch1'), '-m', 'Create branch.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--include', '/branches', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r2\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /branches/branch1\n'), '-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /branches\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) |
def dump_exclude_by_pattern(sbox):
'svnadmin dump with paths excluded by pattern'
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/aaa'), (sbox.repo_url + '/aab'), (sbox.repo_url + '/aac'), (sbox.repo_url + '/bbc'), '-m', 'Create repository structure.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--exclude', '/aa?', '--pattern', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /bbc\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) | 2,939,393,347,528,312,300 | svnadmin dump with paths excluded by pattern | subversion/tests/cmdline/svnadmin_tests.py | dump_exclude_by_pattern | auycro/subversion | python | def dump_exclude_by_pattern(sbox):
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/aaa'), (sbox.repo_url + '/aab'), (sbox.repo_url + '/aac'), (sbox.repo_url + '/bbc'), '-m', 'Create repository structure.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--exclude', '/aa?', '--pattern', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /bbc\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) |
def dump_include_by_pattern(sbox):
'svnadmin dump with paths included by pattern'
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/aaa'), (sbox.repo_url + '/aab'), (sbox.repo_url + '/aac'), (sbox.repo_url + '/bbc'), '-m', 'Create repository structure.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--include', '/aa?', '--pattern', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /aaa\n'), re.escape(' A /aab\n'), re.escape(' A /aac\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) | 1,634,369,048,131,271,000 | svnadmin dump with paths included by pattern | subversion/tests/cmdline/svnadmin_tests.py | dump_include_by_pattern | auycro/subversion | python | def dump_include_by_pattern(sbox):
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/aaa'), (sbox.repo_url + '/aab'), (sbox.repo_url + '/aac'), (sbox.repo_url + '/bbc'), '-m', 'Create repository structure.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--include', '/aa?', '--pattern', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r1\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /aaa\n'), re.escape(' A /aab\n'), re.escape(' A /aac\n'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) |
def dump_exclude_all_rev_changes(sbox):
'svnadmin dump with all revision changes excluded'
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/r1a'), (sbox.repo_url + '/r1b'), (sbox.repo_url + '/r1c'), '-m', 'Revision 1.')
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/r2a'), (sbox.repo_url + '/r2b'), (sbox.repo_url + '/r2c'), '-m', 'Revision 2.')
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/r3a'), (sbox.repo_url + '/r3b'), (sbox.repo_url + '/r3c'), '-m', 'Revision 3.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--exclude', '/r2?', '--pattern', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r3 | jrandom | .* | 1 line\\n', re.escape('Changed paths:'), re.escape(' A /r3a'), re.escape(' A /r3b'), re.escape(' A /r3c'), '', re.escape('Revision 3.'), '-+\\n', re.escape('r2 | (no author) | (no date) | 1 line'), '', '', '-+\\n', 'r1 | jrandom | .* | 1 line\\n', re.escape('Changed paths:'), re.escape(' A /r1a'), re.escape(' A /r1b'), re.escape(' A /r1c'), '', re.escape('Revision 1.'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', sbox2.repo_url) | 880,426,043,000,715,300 | svnadmin dump with all revision changes excluded | subversion/tests/cmdline/svnadmin_tests.py | dump_exclude_all_rev_changes | auycro/subversion | python | def dump_exclude_all_rev_changes(sbox):
sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/r1a'), (sbox.repo_url + '/r1b'), (sbox.repo_url + '/r1c'), '-m', 'Revision 1.')
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/r2a'), (sbox.repo_url + '/r2b'), (sbox.repo_url + '/r2c'), '-m', 'Revision 2.')
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'mkdir', (sbox.repo_url + '/r3a'), (sbox.repo_url + '/r3b'), (sbox.repo_url + '/r3c'), '-m', 'Revision 3.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--exclude', '/r2?', '--pattern', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r3 | jrandom | .* | 1 line\\n', re.escape('Changed paths:'), re.escape(' A /r3a'), re.escape(' A /r3b'), re.escape(' A /r3c'), , re.escape('Revision 3.'), '-+\\n', re.escape('r2 | (no author) | (no date) | 1 line'), , , '-+\\n', 'r1 | jrandom | .* | 1 line\\n', re.escape('Changed paths:'), re.escape(' A /r1a'), re.escape(' A /r1b'), re.escape(' A /r1c'), , re.escape('Revision 1.'), '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', sbox2.repo_url) |
def dump_invalid_filtering_option(sbox):
'dump with --include and --exclude simultaneously'
sbox.build(create_wc=False, empty=False)
expected_error = ".*: '--exclude' and '--include' options cannot be used simultaneously"
svntest.actions.run_and_verify_svnadmin(None, expected_error, 'dump', '-q', '--exclude', '/A/D/H', '--include', '/A/B/E', sbox.repo_dir) | 326,157,909,571,971,500 | dump with --include and --exclude simultaneously | subversion/tests/cmdline/svnadmin_tests.py | dump_invalid_filtering_option | auycro/subversion | python | def dump_invalid_filtering_option(sbox):
sbox.build(create_wc=False, empty=False)
expected_error = ".*: '--exclude' and '--include' options cannot be used simultaneously"
svntest.actions.run_and_verify_svnadmin(None, expected_error, 'dump', '-q', '--exclude', '/A/D/H', '--include', '/A/B/E', sbox.repo_dir) |
@Issue(4725)
def load_issue4725(sbox):
'load that triggers issue 4725'
sbox.build(empty=True)
sbox.simple_mkdir('subversion')
sbox.simple_commit()
sbox.simple_mkdir('subversion/trunk')
sbox.simple_mkdir('subversion/branches')
sbox.simple_commit()
sbox.simple_mkdir('subversion/trunk/src')
sbox.simple_commit()
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump, '-M100') | -1,960,894,554,059,477,200 | load that triggers issue 4725 | subversion/tests/cmdline/svnadmin_tests.py | load_issue4725 | auycro/subversion | python | @Issue(4725)
def load_issue4725(sbox):
sbox.build(empty=True)
sbox.simple_mkdir('subversion')
sbox.simple_commit()
sbox.simple_mkdir('subversion/trunk')
sbox.simple_mkdir('subversion/branches')
sbox.simple_commit()
sbox.simple_mkdir('subversion/trunk/src')
sbox.simple_commit()
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump, '-M100') |
@Issue(4767)
def dump_no_canonicalize_svndate(sbox):
"svnadmin dump shouldn't canonicalize svn:date"
sbox.build(create_wc=False, empty=True)
svntest.actions.enable_revprop_changes(sbox.repo_dir)
propval = '2015-01-01T00:00:00.0Z'
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'propset', '--revprop', '-r0', 'svn:date', propval, sbox.repo_url)
dump_lines = svntest.actions.run_and_verify_dump(sbox.repo_dir)
assert ((propval.encode() + b'\n') in dump_lines) | 45,566,800,534,895,640 | svnadmin dump shouldn't canonicalize svn:date | subversion/tests/cmdline/svnadmin_tests.py | dump_no_canonicalize_svndate | auycro/subversion | python | @Issue(4767)
def dump_no_canonicalize_svndate(sbox):
sbox.build(create_wc=False, empty=True)
svntest.actions.enable_revprop_changes(sbox.repo_dir)
propval = '2015-01-01T00:00:00.0Z'
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'propset', '--revprop', '-r0', 'svn:date', propval, sbox.repo_url)
dump_lines = svntest.actions.run_and_verify_dump(sbox.repo_dir)
assert ((propval.encode() + b'\n') in dump_lines) |
def check_recover_prunes_rep_cache(sbox, enable_rep_sharing):
"Check 'recover' prunes the rep-cache while enable-rep-sharing is\n true/false.\n "
rep_cache_r1 = read_rep_cache(sbox.repo_dir)
sbox.simple_append('iota', 'New line.\n')
sbox.simple_commit()
rep_cache_r2 = read_rep_cache(sbox.repo_dir)
if (not (len(rep_cache_r2) == (len(rep_cache_r1) + 1))):
raise svntest.Failure
fsfs_conf = svntest.main.get_fsfs_conf_file_path(sbox.repo_dir)
svntest.main.file_append(fsfs_conf, ('\n[rep-sharing]\nenable-rep-sharing = %s\n' % (('true' if enable_rep_sharing else 'false'),)))
head_rev_path = fsfs_file(sbox.repo_dir, 'revs', '2')
os.remove(head_rev_path)
current_path = os.path.join(sbox.repo_dir, 'db', 'current')
svntest.main.file_write(current_path, '1\n')
svntest.actions.run_and_verify_svnadmin(None, [], 'recover', sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(['1\n'], [], 'youngest', sbox.repo_dir)
rep_cache_recovered = read_rep_cache(sbox.repo_dir)
if (not (rep_cache_recovered == rep_cache_r1)):
raise svntest.Failure | 1,701,348,569,688,906,800 | Check 'recover' prunes the rep-cache while enable-rep-sharing is
true/false. | subversion/tests/cmdline/svnadmin_tests.py | check_recover_prunes_rep_cache | auycro/subversion | python | def check_recover_prunes_rep_cache(sbox, enable_rep_sharing):
"Check 'recover' prunes the rep-cache while enable-rep-sharing is\n true/false.\n "
rep_cache_r1 = read_rep_cache(sbox.repo_dir)
sbox.simple_append('iota', 'New line.\n')
sbox.simple_commit()
rep_cache_r2 = read_rep_cache(sbox.repo_dir)
if (not (len(rep_cache_r2) == (len(rep_cache_r1) + 1))):
raise svntest.Failure
fsfs_conf = svntest.main.get_fsfs_conf_file_path(sbox.repo_dir)
svntest.main.file_append(fsfs_conf, ('\n[rep-sharing]\nenable-rep-sharing = %s\n' % (('true' if enable_rep_sharing else 'false'),)))
head_rev_path = fsfs_file(sbox.repo_dir, 'revs', '2')
os.remove(head_rev_path)
current_path = os.path.join(sbox.repo_dir, 'db', 'current')
svntest.main.file_write(current_path, '1\n')
svntest.actions.run_and_verify_svnadmin(None, [], 'recover', sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(['1\n'], [], 'youngest', sbox.repo_dir)
rep_cache_recovered = read_rep_cache(sbox.repo_dir)
if (not (rep_cache_recovered == rep_cache_r1)):
raise svntest.Failure |
@Issue(4077)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.python_sqlite_can_read_without_rowid)
def recover_prunes_rep_cache_when_enabled(sbox):
'recover prunes rep cache when enabled'
sbox.build()
check_recover_prunes_rep_cache(sbox, enable_rep_sharing=True) | -6,380,502,636,390,315,000 | recover prunes rep cache when enabled | subversion/tests/cmdline/svnadmin_tests.py | recover_prunes_rep_cache_when_enabled | auycro/subversion | python | @Issue(4077)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.python_sqlite_can_read_without_rowid)
def recover_prunes_rep_cache_when_enabled(sbox):
sbox.build()
check_recover_prunes_rep_cache(sbox, enable_rep_sharing=True) |
@Issue(4077)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.python_sqlite_can_read_without_rowid)
def recover_prunes_rep_cache_when_disabled(sbox):
'recover prunes rep cache when disabled'
sbox.build()
check_recover_prunes_rep_cache(sbox, enable_rep_sharing=False) | -6,677,523,881,005,802,000 | recover prunes rep cache when disabled | subversion/tests/cmdline/svnadmin_tests.py | recover_prunes_rep_cache_when_disabled | auycro/subversion | python | @Issue(4077)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.python_sqlite_can_read_without_rowid)
def recover_prunes_rep_cache_when_disabled(sbox):
sbox.build()
check_recover_prunes_rep_cache(sbox, enable_rep_sharing=False) |
@Issue(4760)
def dump_include_copied_directory(sbox):
'include copied directory with nested nodes'
sbox.build(create_wc=False)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'copy', (sbox.repo_url + '/A/D'), (sbox.repo_url + '/COPY'), '-m', 'Create branch.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--include', '/COPY', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r2\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /COPY'), re.escape(' A /COPY/G'), re.escape(' A /COPY/G/pi'), re.escape(' A /COPY/G/rho'), re.escape(' A /COPY/G/tau'), re.escape(' A /COPY/H'), re.escape(' A /COPY/H/chi'), re.escape(' A /COPY/H/omega'), re.escape(' A /COPY/H/psi'), re.escape(' A /COPY/gamma'), '-+\\n', 'r1\\ .*\n', '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) | 450,583,208,109,373,600 | include copied directory with nested nodes | subversion/tests/cmdline/svnadmin_tests.py | dump_include_copied_directory | auycro/subversion | python | @Issue(4760)
def dump_include_copied_directory(sbox):
sbox.build(create_wc=False)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'copy', (sbox.repo_url + '/A/D'), (sbox.repo_url + '/COPY'), '-m', 'Create branch.')
(_, dump, _) = svntest.actions.run_and_verify_svnadmin(None, [], 'dump', '-q', '--include', '/COPY', sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
expected_output = svntest.verify.RegexListOutput(['-+\\n', 'r2\\ .*\n', re.escape('Changed paths:\n'), re.escape(' A /COPY'), re.escape(' A /COPY/G'), re.escape(' A /COPY/G/pi'), re.escape(' A /COPY/G/rho'), re.escape(' A /COPY/G/tau'), re.escape(' A /COPY/H'), re.escape(' A /COPY/H/chi'), re.escape(' A /COPY/H/omega'), re.escape(' A /COPY/H/psi'), re.escape(' A /COPY/gamma'), '-+\\n', 'r1\\ .*\n', '-+\\n'])
svntest.actions.run_and_verify_svn(expected_output, [], 'log', '-v', '-q', sbox2.repo_url) |
def load_normalize_node_props(sbox):
'svnadmin load --normalize node props'
dump_str = b'SVN-fs-dump-format-version: 2\n\nUUID: dc40867b-38f6-0310-9f5f-f81aa277e06f\n\nRevision-number: 0\nProp-content-length: 56\nContent-length: 56\n\nK 8\nsvn:date\nV 27\n2005-05-03T19:09:41.129900Z\nPROPS-END\n\nRevision-number: 1\nProp-content-length: 99\nContent-length: 99\n\nK 7\nsvn:log\nV 0\n\nK 10\nsvn:author\nV 2\npl\nK 8\nsvn:date\nV 27\n2005-05-03T19:10:19.975578Z\nPROPS-END\n\nNode-path: \nNode-kind: dir\nNode-action: change\nProp-content-length: 32\nContent-length: 32\n\nK 10\nsvn:ignore\nV 3\n\n\r\n\nPROPS-END\n\n\n'
sbox.build(empty=True)
exp_err = svntest.verify.RegexListOutput(['svnadmin: E125005:.*', 'svnadmin: E125017:.*'], match_all=False)
load_and_verify_dumpstream(sbox, [], exp_err, dumpfile_revisions, False, dump_str, '--ignore-uuid')
svntest.actions.load_repo(sbox, dump_str=dump_str, bypass_prop_validation=False, normalize_props=True)
(exit_code, output, _) = svntest.main.run_svn(None, 'pg', 'svn:ignore', '--no-newline', sbox.repo_url)
svntest.verify.verify_exit_code(None, exit_code, 0)
if (output != ['\n', '\n']):
raise svntest.Failure(('Unexpected property value %s' % output)) | -5,918,447,927,095,142,000 | svnadmin load --normalize node props | subversion/tests/cmdline/svnadmin_tests.py | load_normalize_node_props | auycro/subversion | python | def load_normalize_node_props(sbox):
dump_str = b'SVN-fs-dump-format-version: 2\n\nUUID: dc40867b-38f6-0310-9f5f-f81aa277e06f\n\nRevision-number: 0\nProp-content-length: 56\nContent-length: 56\n\nK 8\nsvn:date\nV 27\n2005-05-03T19:09:41.129900Z\nPROPS-END\n\nRevision-number: 1\nProp-content-length: 99\nContent-length: 99\n\nK 7\nsvn:log\nV 0\n\nK 10\nsvn:author\nV 2\npl\nK 8\nsvn:date\nV 27\n2005-05-03T19:10:19.975578Z\nPROPS-END\n\nNode-path: \nNode-kind: dir\nNode-action: change\nProp-content-length: 32\nContent-length: 32\n\nK 10\nsvn:ignore\nV 3\n\n\r\n\nPROPS-END\n\n\n'
sbox.build(empty=True)
exp_err = svntest.verify.RegexListOutput(['svnadmin: E125005:.*', 'svnadmin: E125017:.*'], match_all=False)
load_and_verify_dumpstream(sbox, [], exp_err, dumpfile_revisions, False, dump_str, '--ignore-uuid')
svntest.actions.load_repo(sbox, dump_str=dump_str, bypass_prop_validation=False, normalize_props=True)
(exit_code, output, _) = svntest.main.run_svn(None, 'pg', 'svn:ignore', '--no-newline', sbox.repo_url)
svntest.verify.verify_exit_code(None, exit_code, 0)
if (output != ['\n', '\n']):
raise svntest.Failure(('Unexpected property value %s' % output)) |
def _read(self):
' Read P2L index using svnfsfs. '
(exit_code, output, errput) = svntest.main.run_svnfsfs('dump-index', ('-r' + str(self.revision)), self.repo_dir)
svntest.verify.verify_outputs('Error while dumping index', [], errput, [], [])
svntest.verify.verify_exit_code(None, exit_code, 0)
self.by_item.clear()
for line in output:
values = line.split()
if ((len(values) >= 4) and (values[0] != 'Start')):
item = int(values[4])
self.by_item[item] = values | 2,798,168,454,450,458,000 | Read P2L index using svnfsfs. | subversion/tests/cmdline/svnadmin_tests.py | _read | auycro/subversion | python | def _read(self):
' '
(exit_code, output, errput) = svntest.main.run_svnfsfs('dump-index', ('-r' + str(self.revision)), self.repo_dir)
svntest.verify.verify_outputs('Error while dumping index', [], errput, [], [])
svntest.verify.verify_exit_code(None, exit_code, 0)
self.by_item.clear()
for line in output:
values = line.split()
if ((len(values) >= 4) and (values[0] != 'Start')):
item = int(values[4])
self.by_item[item] = values |
def _write(self):
' Rewrite indexes using svnfsfs. '
by_offset = {}
for key in self.by_item:
values = self.by_item[key]
by_offset[int(values[0], 16)] = values
lines = []
for (offset, values) in sorted(by_offset.items()):
values = by_offset[offset]
line = (((((((((values[0] + ' ') + values[1]) + ' ') + values[2]) + ' ') + values[3]) + ' ') + values[4]) + '\n')
lines.append(line.encode())
(exit_code, output, errput) = svntest.main.run_command_stdin(svntest.main.svnfsfs_binary, 0, 0, False, lines, 'load-index', self.repo_dir)
svntest.verify.verify_outputs('Error while rewriting index', output, errput, [], [])
svntest.verify.verify_exit_code(None, exit_code, 0) | 463,800,620,164,410,800 | Rewrite indexes using svnfsfs. | subversion/tests/cmdline/svnadmin_tests.py | _write | auycro/subversion | python | def _write(self):
' '
by_offset = {}
for key in self.by_item:
values = self.by_item[key]
by_offset[int(values[0], 16)] = values
lines = []
for (offset, values) in sorted(by_offset.items()):
values = by_offset[offset]
line = (((((((((values[0] + ' ') + values[1]) + ' ') + values[2]) + ' ') + values[3]) + ' ') + values[4]) + '\n')
lines.append(line.encode())
(exit_code, output, errput) = svntest.main.run_command_stdin(svntest.main.svnfsfs_binary, 0, 0, False, lines, 'load-index', self.repo_dir)
svntest.verify.verify_outputs('Error while rewriting index', output, errput, [], [])
svntest.verify.verify_exit_code(None, exit_code, 0) |
def get_item(self, item):
' Return offset, length and type of ITEM. '
values = self.by_item[item]
offset = int(values[0], 16)
len = int(values[1], 16)
type = values[2]
return (offset, len, type) | -2,074,339,648,030,979,800 | Return offset, length and type of ITEM. | subversion/tests/cmdline/svnadmin_tests.py | get_item | auycro/subversion | python | def get_item(self, item):
' '
values = self.by_item[item]
offset = int(values[0], 16)
len = int(values[1], 16)
type = values[2]
return (offset, len, type) |
def modify_item(self, item, offset, len):
' Modify offset and length of ITEM. '
values = self.by_item[item]
values[0] = ('%x' % offset)
values[1] = ('%x' % len)
self._write() | -2,660,641,024,189,489,700 | Modify offset and length of ITEM. | subversion/tests/cmdline/svnadmin_tests.py | modify_item | auycro/subversion | python | def modify_item(self, item, offset, len):
' '
values = self.by_item[item]
values[0] = ('%x' % offset)
values[1] = ('%x' % len)
self._write() |
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args='--enable_extended_memory_metrics=true')
def test_varz_hidden_variables(self):
'Tests that modified hidden variables show up in /varz'
response = requests.get('http://localhost:25000/varz?json')
assert (response.status_code == requests.codes.ok)
varz_json = json.loads(response.text)
flag = [e for e in varz_json['flags'] if (e['name'] == 'enable_extended_memory_metrics')]
assert (len(flag) == 1)
assert (flag[0]['default'] == 'false')
assert (flag[0]['current'] == 'true')
assert flag[0]['experimental'] | 7,393,242,295,967,590,000 | Tests that modified hidden variables show up in /varz | tests/custom_cluster/test_web_pages.py | test_varz_hidden_variables | AlexanderSaydakov/impala | python | @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args='--enable_extended_memory_metrics=true')
def test_varz_hidden_variables(self):
response = requests.get('http://localhost:25000/varz?json')
assert (response.status_code == requests.codes.ok)
varz_json = json.loads(response.text)
flag = [e for e in varz_json['flags'] if (e['name'] == 'enable_extended_memory_metrics')]
assert (len(flag) == 1)
assert (flag[0]['default'] == 'false')
assert (flag[0]['current'] == 'true')
assert flag[0]['experimental'] |
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args='--webserver_max_post_length_bytes=100')
def test_max_post_length(self):
'Tests that the maximum length of a POST request that will be accepted'
too_big_post_content = ('c' * 10000)
response = requests.post('http://localhost:25000/', too_big_post_content)
assert (response.status_code == requests.codes.request_entity_too_large)
ok_post_content = ('c' * 100)
response = requests.post('http://localhost:25000/', ok_post_content)
assert (response.status_code == requests.codes.ok) | 5,251,090,867,718,838,000 | Tests that the maximum length of a POST request that will be accepted | tests/custom_cluster/test_web_pages.py | test_max_post_length | AlexanderSaydakov/impala | python | @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args='--webserver_max_post_length_bytes=100')
def test_max_post_length(self):
too_big_post_content = ('c' * 10000)
response = requests.post('http://localhost:25000/', too_big_post_content)
assert (response.status_code == requests.codes.request_entity_too_large)
ok_post_content = ('c' * 100)
response = requests.post('http://localhost:25000/', ok_post_content)
assert (response.status_code == requests.codes.ok) |
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args='--query_stmt_size=0')
def test_query_stmt_without_truncate(self):
'Check if the full query string is displayed in the query list on the WebUI.'
query_select = ('x ' * 450)
query = 'select "{0}"'.format(query_select)
expected = 'select \\"{0}\\"'.format(query_select)
self.execute_query(query)
response = requests.get('http://localhost:25000/queries?json')
response_json = response.text
assert (expected in response_json), 'No matching statement found in the queries site.' | 2,366,411,541,843,872,300 | Check if the full query string is displayed in the query list on the WebUI. | tests/custom_cluster/test_web_pages.py | test_query_stmt_without_truncate | AlexanderSaydakov/impala | python | @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args='--query_stmt_size=0')
def test_query_stmt_without_truncate(self):
query_select = ('x ' * 450)
query = 'select "{0}"'.format(query_select)
expected = 'select \\"{0}\\"'.format(query_select)
self.execute_query(query)
response = requests.get('http://localhost:25000/queries?json')
response_json = response.text
assert (expected in response_json), 'No matching statement found in the queries site.' |
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args='--query_stmt_size=10')
def test_query_stmt_with_custom_length(self):
'Check if the partial query with the correct length is displayed in the query list\n on the WebUI.'
query = 'select "{0}"'.format(('x ' * 450))
expected = 'select \\"x ...'
self.execute_query(query)
response = requests.get('http://localhost:25000/queries?json')
response_json = response.text
assert (expected in response_json), 'No matching statement found in the queries site.' | 573,251,272,263,913,500 | Check if the partial query with the correct length is displayed in the query list
on the WebUI. | tests/custom_cluster/test_web_pages.py | test_query_stmt_with_custom_length | AlexanderSaydakov/impala | python | @pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args='--query_stmt_size=10')
def test_query_stmt_with_custom_length(self):
'Check if the partial query with the correct length is displayed in the query list\n on the WebUI.'
query = 'select "{0}"'.format(('x ' * 450))
expected = 'select \\"x ...'
self.execute_query(query)
response = requests.get('http://localhost:25000/queries?json')
response_json = response.text
assert (expected in response_json), 'No matching statement found in the queries site.' |
def goal_conditions_for_demo(demo: Demonstration, behaviors: Any) -> List[str]:
'\n Infer the goal conditions of a single demonstration.\n\n Args\n ----\n demo: the demonstration to infer the goal of.\n behavior: check the behavior to remove conflicting conditions.\n\n Returns\n -------\n goals: list of the goals inferred in the demonstration.\n\n '
goals = []
for i in range((len(demo) - 1), (- 1), (- 1)):
for condition in demo[i].postconditions():
if ((condition not in goals) and (not contains_conflicting(behaviors, goals, condition))):
goals.append(condition)
goals.reverse()
return goals | -8,493,976,639,920,056,000 | Infer the goal conditions of a single demonstration.
Args
----
demo: the demonstration to infer the goal of.
behavior: check the behavior to remove conflicting conditions.
Returns
-------
goals: list of the goals inferred in the demonstration. | bt_learning/bt_learning/learning_from_demo/goal_identification.py | goal_conditions_for_demo | matiov/disambiguate-BT-execution | python | def goal_conditions_for_demo(demo: Demonstration, behaviors: Any) -> List[str]:
'\n Infer the goal conditions of a single demonstration.\n\n Args\n ----\n demo: the demonstration to infer the goal of.\n behavior: check the behavior to remove conflicting conditions.\n\n Returns\n -------\n goals: list of the goals inferred in the demonstration.\n\n '
goals = []
for i in range((len(demo) - 1), (- 1), (- 1)):
for condition in demo[i].postconditions():
if ((condition not in goals) and (not contains_conflicting(behaviors, goals, condition))):
goals.append(condition)
goals.reverse()
return goals |
def goal_tree(goals: List[str], behaviors: Any, world_interface: Any) -> BehaviourTree:
'\n Construct a Behavior Tree strarting from the goals.\n\n Args\n ----\n goals: list of all goals inferred from the demonstration.\n behaviors: behavior in the demontration, as defined in robot_behaviors package.\n world_interface: interface to the robot.\n\n Returns\n -------\n tree: a Behavior Tree of goal conditions.\n\n '
tree = RSequence()
for goal in goals:
(node, _) = behaviors.get_node_from_string(goal, world_interface, None)
tree.add_child(node)
return tree | 6,080,646,337,530,809,000 | Construct a Behavior Tree strarting from the goals.
Args
----
goals: list of all goals inferred from the demonstration.
behaviors: behavior in the demontration, as defined in robot_behaviors package.
world_interface: interface to the robot.
Returns
-------
tree: a Behavior Tree of goal conditions. | bt_learning/bt_learning/learning_from_demo/goal_identification.py | goal_tree | matiov/disambiguate-BT-execution | python | def goal_tree(goals: List[str], behaviors: Any, world_interface: Any) -> BehaviourTree:
'\n Construct a Behavior Tree strarting from the goals.\n\n Args\n ----\n goals: list of all goals inferred from the demonstration.\n behaviors: behavior in the demontration, as defined in robot_behaviors package.\n world_interface: interface to the robot.\n\n Returns\n -------\n tree: a Behavior Tree of goal conditions.\n\n '
tree = RSequence()
for goal in goals:
(node, _) = behaviors.get_node_from_string(goal, world_interface, None)
tree.add_child(node)
return tree |
def get_minibatch(roidb, num_classes):
'Given a roidb, construct a minibatch sampled from it.'
num_images = len(roidb)
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images)
assert ((cfg.TRAIN.BATCH_SIZE % num_images) == 0), 'num_images ({}) must divide BATCH_SIZE ({})'.format(num_images, cfg.TRAIN.BATCH_SIZE)
(im_blob, im_scales) = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
assert (len(im_scales) == 1), 'Single batch only'
assert (len(roidb) == 1), 'Single batch only'
sep = '/'
clp_file_format = '.npy'
clp_file_store = 'CloudPoints'
img_path = roidb[0]['image']
img_path_arr = img_path.split(sep)
prefix = img_path_arr[:(- 2)]
file_name = (img_path_arr[(- 1)].split('.')[0] + clp_file_format)
clp_path = os.path.join(sep.join(prefix), clp_file_store, file_name)
valid_points = np.load(clp_path)
width_ori = roidb[0]['height']
height_ori = roidb[0]['width']
clp_ori = np.zeros([width_ori, height_ori], dtype=np.float32)
clp_ori[tuple((valid_points.T[1, :], valid_points.T[0, :]))] = 1
clp_reshape = np.empty([width_ori, height_ori, 3], dtype=np.float32)
for i in range(3):
clp_reshape[0:width_ori, 0:height_ori, i] = clp_ori
clp_res = cv2.resize(clp_reshape, None, None, fx=im_scales[0], fy=im_scales[0], interpolation=cv2.INTER_LINEAR)
clp_res = clp_res[:, :, 0]
clp_res[(clp_res > 0)] = 1
width = clp_res.shape[0]
height = clp_res.shape[1]
clp_res = clp_res.reshape([1, width, height, 1])
blobs['clp_info'] = clp_res
if cfg.TRAIN.USE_ALL_GT:
gt_inds = np.where((roidb[0]['gt_classes'] != 0))[0]
else:
gt_inds = np.where((roidb[0]['gt_classes'] != (0 & np.all((roidb[0]['gt_overlaps'].toarray() > (- 1.0)), axis=1))))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = (roidb[0]['boxes'][gt_inds, :] * im_scales[0])
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)
return blobs | -3,172,347,574,009,824,000 | Given a roidb, construct a minibatch sampled from it. | lib/roi_data_layer/minibatch.py | get_minibatch | wennieWN/endernewton_tf-faster-rcnn | python | def get_minibatch(roidb, num_classes):
num_images = len(roidb)
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images)
assert ((cfg.TRAIN.BATCH_SIZE % num_images) == 0), 'num_images ({}) must divide BATCH_SIZE ({})'.format(num_images, cfg.TRAIN.BATCH_SIZE)
(im_blob, im_scales) = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
assert (len(im_scales) == 1), 'Single batch only'
assert (len(roidb) == 1), 'Single batch only'
sep = '/'
clp_file_format = '.npy'
clp_file_store = 'CloudPoints'
img_path = roidb[0]['image']
img_path_arr = img_path.split(sep)
prefix = img_path_arr[:(- 2)]
file_name = (img_path_arr[(- 1)].split('.')[0] + clp_file_format)
clp_path = os.path.join(sep.join(prefix), clp_file_store, file_name)
valid_points = np.load(clp_path)
width_ori = roidb[0]['height']
height_ori = roidb[0]['width']
clp_ori = np.zeros([width_ori, height_ori], dtype=np.float32)
clp_ori[tuple((valid_points.T[1, :], valid_points.T[0, :]))] = 1
clp_reshape = np.empty([width_ori, height_ori, 3], dtype=np.float32)
for i in range(3):
clp_reshape[0:width_ori, 0:height_ori, i] = clp_ori
clp_res = cv2.resize(clp_reshape, None, None, fx=im_scales[0], fy=im_scales[0], interpolation=cv2.INTER_LINEAR)
clp_res = clp_res[:, :, 0]
clp_res[(clp_res > 0)] = 1
width = clp_res.shape[0]
height = clp_res.shape[1]
clp_res = clp_res.reshape([1, width, height, 1])
blobs['clp_info'] = clp_res
if cfg.TRAIN.USE_ALL_GT:
gt_inds = np.where((roidb[0]['gt_classes'] != 0))[0]
else:
gt_inds = np.where((roidb[0]['gt_classes'] != (0 & np.all((roidb[0]['gt_overlaps'].toarray() > (- 1.0)), axis=1))))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = (roidb[0]['boxes'][gt_inds, :] * im_scales[0])
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)
return blobs |
def _get_image_blob(roidb, scale_inds):
'Builds an input blob from the images in the roidb at the specified\n scales.\n '
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::(- 1), :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
(im, im_scale) = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
blob = im_list_to_blob(processed_ims)
return (blob, im_scales) | 1,247,642,366,500,196,600 | Builds an input blob from the images in the roidb at the specified
scales. | lib/roi_data_layer/minibatch.py | _get_image_blob | wennieWN/endernewton_tf-faster-rcnn | python | def _get_image_blob(roidb, scale_inds):
'Builds an input blob from the images in the roidb at the specified\n scales.\n '
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::(- 1), :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
(im, im_scale) = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
blob = im_list_to_blob(processed_ims)
return (blob, im_scales) |
def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n header,sysid,compid,acc,gyro,mag\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(mav_cc16_IMU, self).__init__(*args, **kwds)
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.sysid is None):
self.sysid = 0
if (self.compid is None):
self.compid = 0
if (self.acc is None):
self.acc = geometry_msgs.msg.Vector3()
if (self.gyro is None):
self.gyro = geometry_msgs.msg.Vector3()
if (self.mag is None):
self.mag = geometry_msgs.msg.Vector3()
else:
self.header = std_msgs.msg.Header()
self.sysid = 0
self.compid = 0
self.acc = geometry_msgs.msg.Vector3()
self.gyro = geometry_msgs.msg.Vector3()
self.mag = geometry_msgs.msg.Vector3() | -5,999,975,263,630,756,000 | Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,sysid,compid,acc,gyro,mag
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields. | Catkin_PKG_Car/devel/lib/python2.7/dist-packages/drive_ros_msgs/msg/_mav_cc16_IMU.py | __init__ | jessecha/OPCAS | python | def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n header,sysid,compid,acc,gyro,mag\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(mav_cc16_IMU, self).__init__(*args, **kwds)
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.sysid is None):
self.sysid = 0
if (self.compid is None):
self.compid = 0
if (self.acc is None):
self.acc = geometry_msgs.msg.Vector3()
if (self.gyro is None):
self.gyro = geometry_msgs.msg.Vector3()
if (self.mag is None):
self.mag = geometry_msgs.msg.Vector3()
else:
self.header = std_msgs.msg.Header()
self.sysid = 0
self.compid = 0
self.acc = geometry_msgs.msg.Vector3()
self.gyro = geometry_msgs.msg.Vector3()
self.mag = geometry_msgs.msg.Vector3() |
def _get_types(self):
'\n internal API method\n '
return self._slot_types | 840,424,092,067,405,300 | internal API method | Catkin_PKG_Car/devel/lib/python2.7/dist-packages/drive_ros_msgs/msg/_mav_cc16_IMU.py | _get_types | jessecha/OPCAS | python | def _get_types(self):
'\n \n '
return self._slot_types |
def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_2B9d().pack(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))) | -2,631,236,140,840,951,000 | serialize message into buffer
:param buff: buffer, ``StringIO`` | Catkin_PKG_Car/devel/lib/python2.7/dist-packages/drive_ros_msgs/msg/_mav_cc16_IMU.py | serialize | jessecha/OPCAS | python | def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_2B9d().pack(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))) |
def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.acc is None):
self.acc = geometry_msgs.msg.Vector3()
if (self.gyro is None):
self.gyro = geometry_msgs.msg.Vector3()
if (self.mag is None):
self.mag = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 74
(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z) = _get_struct_2B9d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | 11,232,664,134,381,544 | unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str`` | Catkin_PKG_Car/devel/lib/python2.7/dist-packages/drive_ros_msgs/msg/_mav_cc16_IMU.py | deserialize | jessecha/OPCAS | python | def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.acc is None):
self.acc = geometry_msgs.msg.Vector3()
if (self.gyro is None):
self.gyro = geometry_msgs.msg.Vector3()
if (self.mag is None):
self.mag = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 74
(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z) = _get_struct_2B9d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) |
def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_2B9d().pack(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))) | -8,567,815,062,247,637,000 | serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module | Catkin_PKG_Car/devel/lib/python2.7/dist-packages/drive_ros_msgs/msg/_mav_cc16_IMU.py | serialize_numpy | jessecha/OPCAS | python | def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_2B9d().pack(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))) |
def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.acc is None):
self.acc = geometry_msgs.msg.Vector3()
if (self.gyro is None):
self.gyro = geometry_msgs.msg.Vector3()
if (self.mag is None):
self.mag = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 74
(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z) = _get_struct_2B9d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | 6,518,810,983,754,949,000 | unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module | Catkin_PKG_Car/devel/lib/python2.7/dist-packages/drive_ros_msgs/msg/_mav_cc16_IMU.py | deserialize_numpy | jessecha/OPCAS | python | def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.acc is None):
self.acc = geometry_msgs.msg.Vector3()
if (self.gyro is None):
self.gyro = geometry_msgs.msg.Vector3()
if (self.mag is None):
self.mag = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 74
(_x.sysid, _x.compid, _x.acc.x, _x.acc.y, _x.acc.z, _x.gyro.x, _x.gyro.y, _x.gyro.z, _x.mag.x, _x.mag.y, _x.mag.z) = _get_struct_2B9d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) |
def test_events():
'Tests that expected events are created by MOTAccumulator.update().'
acc = mm.MOTAccumulator()
acc.update([], [1, 2], [], frameid=0)
acc.update([1, 2], [], [], frameid=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=2)
acc.update([1, 2], [1, 2], [[0.2, np.nan], [np.nan, 0.1]], frameid=3)
acc.update([1, 2], [1, 2], [[5, 1], [1, 5]], frameid=4)
acc.update([], [], [], frameid=5)
expect = mm.MOTAccumulator.new_event_dataframe()
expect.loc[(0, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(0, 1), :] = ['RAW', np.nan, 1, np.nan]
expect.loc[(0, 2), :] = ['RAW', np.nan, 2, np.nan]
expect.loc[(0, 3), :] = ['FP', np.nan, 1, np.nan]
expect.loc[(0, 4), :] = ['FP', np.nan, 2, np.nan]
expect.loc[(1, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(1, 1), :] = ['RAW', 1, np.nan, np.nan]
expect.loc[(1, 2), :] = ['RAW', 2, np.nan, np.nan]
expect.loc[(1, 3), :] = ['MISS', 1, np.nan, np.nan]
expect.loc[(1, 4), :] = ['MISS', 2, np.nan, np.nan]
expect.loc[(2, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(2, 1), :] = ['RAW', 1, 1, 1.0]
expect.loc[(2, 2), :] = ['RAW', 1, 2, 0.5]
expect.loc[(2, 3), :] = ['RAW', 2, 1, 0.3]
expect.loc[(2, 4), :] = ['RAW', 2, 2, 1.0]
expect.loc[(2, 5), :] = ['MATCH', 1, 2, 0.5]
expect.loc[(2, 6), :] = ['MATCH', 2, 1, 0.3]
expect.loc[(3, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(3, 1), :] = ['RAW', 1, 1, 0.2]
expect.loc[(3, 2), :] = ['RAW', 2, 2, 0.1]
expect.loc[(3, 3), :] = ['TRANSFER', 1, 1, 0.2]
expect.loc[(3, 4), :] = ['SWITCH', 1, 1, 0.2]
expect.loc[(3, 5), :] = ['TRANSFER', 2, 2, 0.1]
expect.loc[(3, 6), :] = ['SWITCH', 2, 2, 0.1]
expect.loc[(4, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(4, 1), :] = ['RAW', 1, 1, 5.0]
expect.loc[(4, 2), :] = ['RAW', 1, 2, 1.0]
expect.loc[(4, 3), :] = ['RAW', 2, 1, 1.0]
expect.loc[(4, 4), :] = ['RAW', 2, 2, 5.0]
expect.loc[(4, 5), :] = ['MATCH', 1, 1, 5.0]
expect.loc[(4, 6), :] = ['MATCH', 2, 2, 5.0]
expect.loc[(5, 0), :] = ['RAW', np.nan, np.nan, np.nan]
pd.util.testing.assert_frame_equal(acc.events, expect) | 2,827,419,784,747,043,300 | Tests that expected events are created by MOTAccumulator.update(). | motmetrics/tests/test_mot.py | test_events | Borda/py-motmetrics | python | def test_events():
acc = mm.MOTAccumulator()
acc.update([], [1, 2], [], frameid=0)
acc.update([1, 2], [], [], frameid=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=2)
acc.update([1, 2], [1, 2], [[0.2, np.nan], [np.nan, 0.1]], frameid=3)
acc.update([1, 2], [1, 2], [[5, 1], [1, 5]], frameid=4)
acc.update([], [], [], frameid=5)
expect = mm.MOTAccumulator.new_event_dataframe()
expect.loc[(0, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(0, 1), :] = ['RAW', np.nan, 1, np.nan]
expect.loc[(0, 2), :] = ['RAW', np.nan, 2, np.nan]
expect.loc[(0, 3), :] = ['FP', np.nan, 1, np.nan]
expect.loc[(0, 4), :] = ['FP', np.nan, 2, np.nan]
expect.loc[(1, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(1, 1), :] = ['RAW', 1, np.nan, np.nan]
expect.loc[(1, 2), :] = ['RAW', 2, np.nan, np.nan]
expect.loc[(1, 3), :] = ['MISS', 1, np.nan, np.nan]
expect.loc[(1, 4), :] = ['MISS', 2, np.nan, np.nan]
expect.loc[(2, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(2, 1), :] = ['RAW', 1, 1, 1.0]
expect.loc[(2, 2), :] = ['RAW', 1, 2, 0.5]
expect.loc[(2, 3), :] = ['RAW', 2, 1, 0.3]
expect.loc[(2, 4), :] = ['RAW', 2, 2, 1.0]
expect.loc[(2, 5), :] = ['MATCH', 1, 2, 0.5]
expect.loc[(2, 6), :] = ['MATCH', 2, 1, 0.3]
expect.loc[(3, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(3, 1), :] = ['RAW', 1, 1, 0.2]
expect.loc[(3, 2), :] = ['RAW', 2, 2, 0.1]
expect.loc[(3, 3), :] = ['TRANSFER', 1, 1, 0.2]
expect.loc[(3, 4), :] = ['SWITCH', 1, 1, 0.2]
expect.loc[(3, 5), :] = ['TRANSFER', 2, 2, 0.1]
expect.loc[(3, 6), :] = ['SWITCH', 2, 2, 0.1]
expect.loc[(4, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(4, 1), :] = ['RAW', 1, 1, 5.0]
expect.loc[(4, 2), :] = ['RAW', 1, 2, 1.0]
expect.loc[(4, 3), :] = ['RAW', 2, 1, 1.0]
expect.loc[(4, 4), :] = ['RAW', 2, 2, 5.0]
expect.loc[(4, 5), :] = ['MATCH', 1, 1, 5.0]
expect.loc[(4, 6), :] = ['MATCH', 2, 2, 5.0]
expect.loc[(5, 0), :] = ['RAW', np.nan, np.nan, np.nan]
pd.util.testing.assert_frame_equal(acc.events, expect) |
def test_max_switch_time():
'Tests max_switch_time option.'
acc = mm.MOTAccumulator(max_switch_time=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=1)
frameid = acc.update([1, 2], [1, 2], [[0.5, np.nan], [np.nan, 0.5]], frameid=2)
df = acc.events.loc[frameid]
assert (((df.Type == 'SWITCH') | (df.Type == 'RAW')) | (df.Type == 'TRANSFER')).all()
acc = mm.MOTAccumulator(max_switch_time=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=1)
frameid = acc.update([1, 2], [1, 2], [[0.5, np.nan], [np.nan, 0.5]], frameid=5)
df = acc.events.loc[frameid]
assert (((df.Type == 'MATCH') | (df.Type == 'RAW')) | (df.Type == 'TRANSFER')).all() | -84,593,867,561,195,820 | Tests max_switch_time option. | motmetrics/tests/test_mot.py | test_max_switch_time | Borda/py-motmetrics | python | def test_max_switch_time():
acc = mm.MOTAccumulator(max_switch_time=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=1)
frameid = acc.update([1, 2], [1, 2], [[0.5, np.nan], [np.nan, 0.5]], frameid=2)
df = acc.events.loc[frameid]
assert (((df.Type == 'SWITCH') | (df.Type == 'RAW')) | (df.Type == 'TRANSFER')).all()
acc = mm.MOTAccumulator(max_switch_time=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=1)
frameid = acc.update([1, 2], [1, 2], [[0.5, np.nan], [np.nan, 0.5]], frameid=5)
df = acc.events.loc[frameid]
assert (((df.Type == 'MATCH') | (df.Type == 'RAW')) | (df.Type == 'TRANSFER')).all() |
def test_auto_id():
'Tests auto_id option.'
acc = mm.MOTAccumulator(auto_id=True)
acc.update([1, 2, 3, 4], [], [])
acc.update([1, 2, 3, 4], [], [])
assert (acc.events.index.levels[0][(- 1)] == 1)
acc.update([1, 2, 3, 4], [], [])
assert (acc.events.index.levels[0][(- 1)] == 2)
with pytest.raises(AssertionError):
acc.update([1, 2, 3, 4], [], [], frameid=5)
acc = mm.MOTAccumulator(auto_id=False)
with pytest.raises(AssertionError):
acc.update([1, 2, 3, 4], [], []) | -1,848,275,232,027,954,400 | Tests auto_id option. | motmetrics/tests/test_mot.py | test_auto_id | Borda/py-motmetrics | python | def test_auto_id():
acc = mm.MOTAccumulator(auto_id=True)
acc.update([1, 2, 3, 4], [], [])
acc.update([1, 2, 3, 4], [], [])
assert (acc.events.index.levels[0][(- 1)] == 1)
acc.update([1, 2, 3, 4], [], [])
assert (acc.events.index.levels[0][(- 1)] == 2)
with pytest.raises(AssertionError):
acc.update([1, 2, 3, 4], [], [], frameid=5)
acc = mm.MOTAccumulator(auto_id=False)
with pytest.raises(AssertionError):
acc.update([1, 2, 3, 4], [], []) |
def test_merge_dataframes():
'Tests merge_event_dataframes().'
acc = mm.MOTAccumulator()
acc.update([], [1, 2], [], frameid=0)
acc.update([1, 2], [], [], frameid=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=2)
acc.update([1, 2], [1, 2], [[0.2, np.nan], [np.nan, 0.1]], frameid=3)
(r, mappings) = mm.MOTAccumulator.merge_event_dataframes([acc.events, acc.events], return_mappings=True)
expect = mm.MOTAccumulator.new_event_dataframe()
expect.loc[(0, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(0, 1), :] = ['RAW', np.nan, mappings[0]['hid_map'][1], np.nan]
expect.loc[(0, 2), :] = ['RAW', np.nan, mappings[0]['hid_map'][2], np.nan]
expect.loc[(0, 3), :] = ['FP', np.nan, mappings[0]['hid_map'][1], np.nan]
expect.loc[(0, 4), :] = ['FP', np.nan, mappings[0]['hid_map'][2], np.nan]
expect.loc[(1, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(1, 1), :] = ['RAW', mappings[0]['oid_map'][1], np.nan, np.nan]
expect.loc[(1, 2), :] = ['RAW', mappings[0]['oid_map'][2], np.nan, np.nan]
expect.loc[(1, 3), :] = ['MISS', mappings[0]['oid_map'][1], np.nan, np.nan]
expect.loc[(1, 4), :] = ['MISS', mappings[0]['oid_map'][2], np.nan, np.nan]
expect.loc[(2, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(2, 1), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 1]
expect.loc[(2, 2), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][2], 0.5]
expect.loc[(2, 3), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][1], 0.3]
expect.loc[(2, 4), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 1.0]
expect.loc[(2, 5), :] = ['MATCH', mappings[0]['oid_map'][1], mappings[0]['hid_map'][2], 0.5]
expect.loc[(2, 6), :] = ['MATCH', mappings[0]['oid_map'][2], mappings[0]['hid_map'][1], 0.3]
expect.loc[(3, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(3, 1), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 2), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
expect.loc[(3, 3), :] = ['TRANSFER', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 4), :] = ['SWITCH', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 5), :] = ['TRANSFER', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
expect.loc[(3, 6), :] = ['SWITCH', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
expect.loc[(4, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(4, 1), :] = ['RAW', np.nan, mappings[1]['hid_map'][1], np.nan]
expect.loc[(4, 2), :] = ['RAW', np.nan, mappings[1]['hid_map'][2], np.nan]
expect.loc[(4, 3), :] = ['FP', np.nan, mappings[1]['hid_map'][1], np.nan]
expect.loc[(4, 4), :] = ['FP', np.nan, mappings[1]['hid_map'][2], np.nan]
expect.loc[(5, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(5, 1), :] = ['RAW', mappings[1]['oid_map'][1], np.nan, np.nan]
expect.loc[(5, 2), :] = ['RAW', mappings[1]['oid_map'][2], np.nan, np.nan]
expect.loc[(5, 3), :] = ['MISS', mappings[1]['oid_map'][1], np.nan, np.nan]
expect.loc[(5, 4), :] = ['MISS', mappings[1]['oid_map'][2], np.nan, np.nan]
expect.loc[(6, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(6, 1), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 1]
expect.loc[(6, 2), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][2], 0.5]
expect.loc[(6, 3), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][1], 0.3]
expect.loc[(6, 4), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 1.0]
expect.loc[(6, 5), :] = ['MATCH', mappings[1]['oid_map'][1], mappings[1]['hid_map'][2], 0.5]
expect.loc[(6, 6), :] = ['MATCH', mappings[1]['oid_map'][2], mappings[1]['hid_map'][1], 0.3]
expect.loc[(7, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(7, 1), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 2), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
expect.loc[(7, 3), :] = ['TRANSFER', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 4), :] = ['SWITCH', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 5), :] = ['TRANSFER', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
expect.loc[(7, 6), :] = ['SWITCH', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
pd.util.testing.assert_frame_equal(r, expect) | 2,031,126,333,783,907,800 | Tests merge_event_dataframes(). | motmetrics/tests/test_mot.py | test_merge_dataframes | Borda/py-motmetrics | python | def test_merge_dataframes():
acc = mm.MOTAccumulator()
acc.update([], [1, 2], [], frameid=0)
acc.update([1, 2], [], [], frameid=1)
acc.update([1, 2], [1, 2], [[1, 0.5], [0.3, 1]], frameid=2)
acc.update([1, 2], [1, 2], [[0.2, np.nan], [np.nan, 0.1]], frameid=3)
(r, mappings) = mm.MOTAccumulator.merge_event_dataframes([acc.events, acc.events], return_mappings=True)
expect = mm.MOTAccumulator.new_event_dataframe()
expect.loc[(0, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(0, 1), :] = ['RAW', np.nan, mappings[0]['hid_map'][1], np.nan]
expect.loc[(0, 2), :] = ['RAW', np.nan, mappings[0]['hid_map'][2], np.nan]
expect.loc[(0, 3), :] = ['FP', np.nan, mappings[0]['hid_map'][1], np.nan]
expect.loc[(0, 4), :] = ['FP', np.nan, mappings[0]['hid_map'][2], np.nan]
expect.loc[(1, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(1, 1), :] = ['RAW', mappings[0]['oid_map'][1], np.nan, np.nan]
expect.loc[(1, 2), :] = ['RAW', mappings[0]['oid_map'][2], np.nan, np.nan]
expect.loc[(1, 3), :] = ['MISS', mappings[0]['oid_map'][1], np.nan, np.nan]
expect.loc[(1, 4), :] = ['MISS', mappings[0]['oid_map'][2], np.nan, np.nan]
expect.loc[(2, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(2, 1), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 1]
expect.loc[(2, 2), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][2], 0.5]
expect.loc[(2, 3), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][1], 0.3]
expect.loc[(2, 4), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 1.0]
expect.loc[(2, 5), :] = ['MATCH', mappings[0]['oid_map'][1], mappings[0]['hid_map'][2], 0.5]
expect.loc[(2, 6), :] = ['MATCH', mappings[0]['oid_map'][2], mappings[0]['hid_map'][1], 0.3]
expect.loc[(3, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(3, 1), :] = ['RAW', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 2), :] = ['RAW', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
expect.loc[(3, 3), :] = ['TRANSFER', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 4), :] = ['SWITCH', mappings[0]['oid_map'][1], mappings[0]['hid_map'][1], 0.2]
expect.loc[(3, 5), :] = ['TRANSFER', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
expect.loc[(3, 6), :] = ['SWITCH', mappings[0]['oid_map'][2], mappings[0]['hid_map'][2], 0.1]
expect.loc[(4, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(4, 1), :] = ['RAW', np.nan, mappings[1]['hid_map'][1], np.nan]
expect.loc[(4, 2), :] = ['RAW', np.nan, mappings[1]['hid_map'][2], np.nan]
expect.loc[(4, 3), :] = ['FP', np.nan, mappings[1]['hid_map'][1], np.nan]
expect.loc[(4, 4), :] = ['FP', np.nan, mappings[1]['hid_map'][2], np.nan]
expect.loc[(5, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(5, 1), :] = ['RAW', mappings[1]['oid_map'][1], np.nan, np.nan]
expect.loc[(5, 2), :] = ['RAW', mappings[1]['oid_map'][2], np.nan, np.nan]
expect.loc[(5, 3), :] = ['MISS', mappings[1]['oid_map'][1], np.nan, np.nan]
expect.loc[(5, 4), :] = ['MISS', mappings[1]['oid_map'][2], np.nan, np.nan]
expect.loc[(6, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(6, 1), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 1]
expect.loc[(6, 2), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][2], 0.5]
expect.loc[(6, 3), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][1], 0.3]
expect.loc[(6, 4), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 1.0]
expect.loc[(6, 5), :] = ['MATCH', mappings[1]['oid_map'][1], mappings[1]['hid_map'][2], 0.5]
expect.loc[(6, 6), :] = ['MATCH', mappings[1]['oid_map'][2], mappings[1]['hid_map'][1], 0.3]
expect.loc[(7, 0), :] = ['RAW', np.nan, np.nan, np.nan]
expect.loc[(7, 1), :] = ['RAW', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 2), :] = ['RAW', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
expect.loc[(7, 3), :] = ['TRANSFER', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 4), :] = ['SWITCH', mappings[1]['oid_map'][1], mappings[1]['hid_map'][1], 0.2]
expect.loc[(7, 5), :] = ['TRANSFER', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
expect.loc[(7, 6), :] = ['SWITCH', mappings[1]['oid_map'][2], mappings[1]['hid_map'][2], 0.1]
pd.util.testing.assert_frame_equal(r, expect) |
def cutadapt_to_json(filepath, savetofile=None):
'Convert cutadapt/trim_galore output to json\n\n Parameters\n ----------\n filepath: string\n Path to trim_galore/cutadapt output.txt\n\n Returns\n -------\n json_data: dict\n '
fh = open(filepath, 'r')
trim_info = {}
length_counts = {}
length_exp = {}
length_obsexp = {}
adapters = {}
sample = None
for l in fh:
if ('cutadapt' in l):
sample = None
if l.startswith('Used user'):
adapters = 'User provided'
break
if l.startswith('No adapter'):
adapters = 'None found (second pass)'
break
if l.startswith('Command line parameters'):
sample = l.split()[(- 1)]
sample = path_leaf(sample).replace('.fq.gz', '').replace('.fastq.gz', '')
if (sample in trim_info):
log.debug('Duplicate sample name found! Overwriting: {}'.format(sample))
trim_info[sample] = dict()
if (sample is not None):
for (k, r) in list(regexes.items()):
match = re.search(r, l)
if match:
trim_info[sample][k] = int(match.group(1).replace(',', ''))
if ('===' in l):
log_section = l.strip().strip('=').strip()
if l.startswith('Sequence:'):
plot_sname = '{} - {}'.format(sample, log_section)
adapters[plot_sname] = l.split(';')[0].strip('Sequence: ')
if (('length' in l) and ('count' in l) and ('expect' in l)):
plot_sname = sample
if (log_section is not None):
plot_sname = '{} - {}'.format(sample, log_section)
length_counts[plot_sname] = dict()
length_exp[plot_sname] = dict()
length_obsexp[plot_sname] = dict()
for l in fh:
r_seqs = re.search('^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)', l)
if r_seqs:
a_len = int(r_seqs.group(1))
length_counts[plot_sname][a_len] = int(r_seqs.group(2))
length_exp[plot_sname][a_len] = float(r_seqs.group(3))
if (float(r_seqs.group(3)) > 0):
length_obsexp[plot_sname][a_len] = (float(r_seqs.group(2)) / float(r_seqs.group(3)))
else:
length_obsexp[plot_sname][a_len] = float(r_seqs.group(2))
else:
break
fh.close()
json_data = {'adapters': adapters, 'trim_info': trim_info, 'length_exp': length_exp, 'length_obsexp': length_obsexp, 'length_counts': length_counts}
if savetofile:
json.dump(json_data, savetofile)
return json_data | 6,655,342,304,274,616,000 | Convert cutadapt/trim_galore output to json
Parameters
----------
filepath: string
Path to trim_galore/cutadapt output.txt
Returns
-------
json_data: dict | riboraptor/cutadapt_to_json.py | cutadapt_to_json | saketkc/riboraptor | python | def cutadapt_to_json(filepath, savetofile=None):
'Convert cutadapt/trim_galore output to json\n\n Parameters\n ----------\n filepath: string\n Path to trim_galore/cutadapt output.txt\n\n Returns\n -------\n json_data: dict\n '
fh = open(filepath, 'r')
trim_info = {}
length_counts = {}
length_exp = {}
length_obsexp = {}
adapters = {}
sample = None
for l in fh:
if ('cutadapt' in l):
sample = None
if l.startswith('Used user'):
adapters = 'User provided'
break
if l.startswith('No adapter'):
adapters = 'None found (second pass)'
break
if l.startswith('Command line parameters'):
sample = l.split()[(- 1)]
sample = path_leaf(sample).replace('.fq.gz', ).replace('.fastq.gz', )
if (sample in trim_info):
log.debug('Duplicate sample name found! Overwriting: {}'.format(sample))
trim_info[sample] = dict()
if (sample is not None):
for (k, r) in list(regexes.items()):
match = re.search(r, l)
if match:
trim_info[sample][k] = int(match.group(1).replace(',', ))
if ('===' in l):
log_section = l.strip().strip('=').strip()
if l.startswith('Sequence:'):
plot_sname = '{} - {}'.format(sample, log_section)
adapters[plot_sname] = l.split(';')[0].strip('Sequence: ')
if (('length' in l) and ('count' in l) and ('expect' in l)):
plot_sname = sample
if (log_section is not None):
plot_sname = '{} - {}'.format(sample, log_section)
length_counts[plot_sname] = dict()
length_exp[plot_sname] = dict()
length_obsexp[plot_sname] = dict()
for l in fh:
r_seqs = re.search('^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)', l)
if r_seqs:
a_len = int(r_seqs.group(1))
length_counts[plot_sname][a_len] = int(r_seqs.group(2))
length_exp[plot_sname][a_len] = float(r_seqs.group(3))
if (float(r_seqs.group(3)) > 0):
length_obsexp[plot_sname][a_len] = (float(r_seqs.group(2)) / float(r_seqs.group(3)))
else:
length_obsexp[plot_sname][a_len] = float(r_seqs.group(2))
else:
break
fh.close()
json_data = {'adapters': adapters, 'trim_info': trim_info, 'length_exp': length_exp, 'length_obsexp': length_obsexp, 'length_counts': length_counts}
if savetofile:
json.dump(json_data, savetofile)
return json_data |
def get_default_conda_env(include_cloudpickle=False, keras_module=None):
'\n :return: The default Conda environment for MLflow Models produced by calls to\n :func:`save_model()` and :func:`log_model()`.\n '
import tensorflow as tf
conda_deps = []
pip_deps = []
if (keras_module is None):
import keras
keras_module = keras
if (keras_module.__name__ == 'keras'):
if (LooseVersion(keras_module.__version__) < LooseVersion('2.3.1')):
conda_deps.append('keras=={}'.format(keras_module.__version__))
else:
pip_deps.append('keras=={}'.format(keras_module.__version__))
if include_cloudpickle:
import cloudpickle
pip_deps.append('cloudpickle=={}'.format(cloudpickle.__version__))
if (LooseVersion(tf.__version__) <= LooseVersion('1.13.2')):
conda_deps.append('tensorflow=={}'.format(tf.__version__))
else:
pip_deps.append('tensorflow=={}'.format(tf.__version__))
return _mlflow_conda_env(additional_conda_deps=conda_deps, additional_pip_deps=pip_deps, additional_conda_channels=None) | -5,623,015,681,642,164,000 | :return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`. | mlflow/keras.py | get_default_conda_env | AnesBenmerzoug/mlflow | python | def get_default_conda_env(include_cloudpickle=False, keras_module=None):
'\n :return: The default Conda environment for MLflow Models produced by calls to\n :func:`save_model()` and :func:`log_model()`.\n '
import tensorflow as tf
conda_deps = []
pip_deps = []
if (keras_module is None):
import keras
keras_module = keras
if (keras_module.__name__ == 'keras'):
if (LooseVersion(keras_module.__version__) < LooseVersion('2.3.1')):
conda_deps.append('keras=={}'.format(keras_module.__version__))
else:
pip_deps.append('keras=={}'.format(keras_module.__version__))
if include_cloudpickle:
import cloudpickle
pip_deps.append('cloudpickle=={}'.format(cloudpickle.__version__))
if (LooseVersion(tf.__version__) <= LooseVersion('1.13.2')):
conda_deps.append('tensorflow=={}'.format(tf.__version__))
else:
pip_deps.append('tensorflow=={}'.format(tf.__version__))
return _mlflow_conda_env(additional_conda_deps=conda_deps, additional_pip_deps=pip_deps, additional_conda_channels=None) |
def save_model(keras_model, path, conda_env=None, mlflow_model=None, custom_objects=None, keras_module=None, signature: ModelSignature=None, input_example: ModelInputExample=None, **kwargs):
'\n Save a Keras model to a path on the local file system.\n\n :param keras_model: Keras model to be saved.\n :param path: Local path where the model is to be saved.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decsribes the environment\n this model should be run in. At minimum, it should specify the\n dependencies contained in :func:`get_default_conda_env()`. If\n ``None``, the default :func:`get_default_conda_env()` environment is\n added to the model. The following is an *example* dictionary\n representation of a Conda environment::\n\n {\n \'name\': \'mlflow-env\',\n \'channels\': [\'defaults\'],\n \'dependencies\': [\n \'python=3.7.0\',\n \'keras=2.2.4\',\n \'tensorflow=1.8.0\'\n ]\n }\n :param mlflow_model: MLflow model config this flavor is being added to.\n :param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to\n custom classes or functions associated with the Keras model. MLflow saves\n these custom layers using CloudPickle and restores them automatically\n when the model is loaded with :py:func:`mlflow.keras.load_model` and\n :py:func:`mlflow.pyfunc.load_model`.\n :param keras_module: Keras module to be used to save / load the model\n (``keras`` or ``tf.keras``). If not provided, MLflow will\n attempt to infer the Keras module based on the given model.\n :param kwargs: kwargs to pass to ``keras_model.save`` method.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column("target_label")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow\n # Build, compile, and train your model\n keras_model = ...\n keras_model_path = ...\n keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])\n results = keras_model.fit(\n x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))\n # Save the model as an MLflow Model\n mlflow.keras.save_model(keras_model, keras_model_path)\n '
if (keras_module is None):
def _is_plain_keras(model):
try:
import keras.engine.network
return isinstance(model, keras.engine.network.Network)
except ImportError:
return False
def _is_tf_keras(model):
try:
import tensorflow.keras.models
return isinstance(model, tensorflow.keras.models.Model)
except ImportError:
return False
if _is_plain_keras(keras_model):
keras_module = importlib.import_module('keras')
elif _is_tf_keras(keras_model):
keras_module = importlib.import_module('tensorflow.keras')
else:
raise MlflowException("Unable to infer keras module from the model, please specify which keras module ('keras' or 'tensorflow.keras') is to be used to save and load the model.")
elif (type(keras_module) == str):
keras_module = importlib.import_module(keras_module)
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
data_subpath = 'data'
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
if (mlflow_model is None):
mlflow_model = Model()
if (signature is not None):
mlflow_model.signature = signature
if (input_example is not None):
_save_example(mlflow_model, input_example, path)
if (custom_objects is not None):
_save_custom_objects(data_path, custom_objects)
with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), 'w') as f:
f.write(keras_module.__name__)
model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH)
model_path = os.path.join(path, model_subpath)
if path.startswith('/dbfs/'):
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
keras_model.save(f.name, **kwargs)
f.flush()
shutil.copyfile(src=f.name, dst=model_path)
else:
keras_model.save(model_path, **kwargs)
mlflow_model.add_flavor(FLAVOR_NAME, keras_module=keras_module.__name__, keras_version=keras_module.__version__, data=data_subpath)
if (conda_env is None):
conda_env = get_default_conda_env(include_cloudpickle=(custom_objects is not None), keras_module=keras_module)
elif (not isinstance(conda_env, dict)):
with open(conda_env, 'r') as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, _CONDA_ENV_SUBPATH), 'w') as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
pyfunc.add_to_model(mlflow_model, loader_module='mlflow.keras', data=data_subpath, env=_CONDA_ENV_SUBPATH)
mlflow_model.save(os.path.join(path, 'MLmodel')) | 1,466,731,621,052,774,000 | Save a Keras model to a path on the local file system.
:param keras_model: Keras model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the
dependencies contained in :func:`get_default_conda_env()`. If
``None``, the default :func:`get_default_conda_env()` environment is
added to the model. The following is an *example* dictionary
representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param mlflow_model: MLflow model config this flavor is being added to.
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
.. code-block:: python
:caption: Example
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model_path = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Save the model as an MLflow Model
mlflow.keras.save_model(keras_model, keras_model_path) | mlflow/keras.py | save_model | AnesBenmerzoug/mlflow | python | def save_model(keras_model, path, conda_env=None, mlflow_model=None, custom_objects=None, keras_module=None, signature: ModelSignature=None, input_example: ModelInputExample=None, **kwargs):
'\n Save a Keras model to a path on the local file system.\n\n :param keras_model: Keras model to be saved.\n :param path: Local path where the model is to be saved.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decsribes the environment\n this model should be run in. At minimum, it should specify the\n dependencies contained in :func:`get_default_conda_env()`. If\n ``None``, the default :func:`get_default_conda_env()` environment is\n added to the model. The following is an *example* dictionary\n representation of a Conda environment::\n\n {\n \'name\': \'mlflow-env\',\n \'channels\': [\'defaults\'],\n \'dependencies\': [\n \'python=3.7.0\',\n \'keras=2.2.4\',\n \'tensorflow=1.8.0\'\n ]\n }\n :param mlflow_model: MLflow model config this flavor is being added to.\n :param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to\n custom classes or functions associated with the Keras model. MLflow saves\n these custom layers using CloudPickle and restores them automatically\n when the model is loaded with :py:func:`mlflow.keras.load_model` and\n :py:func:`mlflow.pyfunc.load_model`.\n :param keras_module: Keras module to be used to save / load the model\n (``keras`` or ``tf.keras``). If not provided, MLflow will\n attempt to infer the Keras module based on the given model.\n :param kwargs: kwargs to pass to ``keras_model.save`` method.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column("target_label")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow\n # Build, compile, and train your model\n keras_model = ...\n keras_model_path = ...\n keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])\n results = keras_model.fit(\n x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))\n # Save the model as an MLflow Model\n mlflow.keras.save_model(keras_model, keras_model_path)\n '
if (keras_module is None):
def _is_plain_keras(model):
try:
import keras.engine.network
return isinstance(model, keras.engine.network.Network)
except ImportError:
return False
def _is_tf_keras(model):
try:
import tensorflow.keras.models
return isinstance(model, tensorflow.keras.models.Model)
except ImportError:
return False
if _is_plain_keras(keras_model):
keras_module = importlib.import_module('keras')
elif _is_tf_keras(keras_model):
keras_module = importlib.import_module('tensorflow.keras')
else:
raise MlflowException("Unable to infer keras module from the model, please specify which keras module ('keras' or 'tensorflow.keras') is to be used to save and load the model.")
elif (type(keras_module) == str):
keras_module = importlib.import_module(keras_module)
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
data_subpath = 'data'
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
if (mlflow_model is None):
mlflow_model = Model()
if (signature is not None):
mlflow_model.signature = signature
if (input_example is not None):
_save_example(mlflow_model, input_example, path)
if (custom_objects is not None):
_save_custom_objects(data_path, custom_objects)
with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), 'w') as f:
f.write(keras_module.__name__)
model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH)
model_path = os.path.join(path, model_subpath)
if path.startswith('/dbfs/'):
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
keras_model.save(f.name, **kwargs)
f.flush()
shutil.copyfile(src=f.name, dst=model_path)
else:
keras_model.save(model_path, **kwargs)
mlflow_model.add_flavor(FLAVOR_NAME, keras_module=keras_module.__name__, keras_version=keras_module.__version__, data=data_subpath)
if (conda_env is None):
conda_env = get_default_conda_env(include_cloudpickle=(custom_objects is not None), keras_module=keras_module)
elif (not isinstance(conda_env, dict)):
with open(conda_env, 'r') as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, _CONDA_ENV_SUBPATH), 'w') as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
pyfunc.add_to_model(mlflow_model, loader_module='mlflow.keras', data=data_subpath, env=_CONDA_ENV_SUBPATH)
mlflow_model.save(os.path.join(path, 'MLmodel')) |
def log_model(keras_model, artifact_path, conda_env=None, custom_objects=None, keras_module=None, registered_model_name=None, signature: ModelSignature=None, input_example: ModelInputExample=None, **kwargs):
'\n Log a Keras model as an MLflow artifact for the current run.\n\n :param keras_model: Keras model to be saved.\n :param artifact_path: Run-relative artifact path.\n :param conda_env: Either a dictionary representation of a Conda environment or\n the path to a Conda environment yaml file.\n If provided, this describes the environment this model should be\n run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If ``None``, the default\n :func:`mlflow.keras.get_default_conda_env()` environment is added to\n the model. The following is an *example* dictionary representation of a\n Conda environment::\n\n {\n \'name\': \'mlflow-env\',\n \'channels\': [\'defaults\'],\n \'dependencies\': [\n \'python=3.7.0\',\n \'keras=2.2.4\',\n \'tensorflow=1.8.0\'\n ]\n }\n\n :param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to\n custom classes or functions associated with the Keras model. MLflow saves\n these custom layers using CloudPickle and restores them automatically\n when the model is loaded with :py:func:`mlflow.keras.load_model` and\n :py:func:`mlflow.pyfunc.load_model`.\n :param keras_module: Keras module to be used to save / load the model\n (``keras`` or ``tf.keras``). If not provided, MLflow will\n attempt to infer the Keras module based on the given model.\n :param registered_model_name: (Experimental) If given, create a model version under\n ``registered_model_name``, also creating a registered model if one\n with the given name does not exist.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column("target_label")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n\n :param kwargs: kwargs to pass to ``keras_model.save`` method.\n\n .. code-block:: python\n :caption: Example\n\n from keras import Dense, layers\n import mlflow\n # Build, compile, and train your model\n keras_model = ...\n keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])\n results = keras_model.fit(\n x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))\n # Log metrics and log the model\n with mlflow.start_run() as run:\n mlflow.keras.log_model(keras_model, "models")\n '
Model.log(artifact_path=artifact_path, flavor=mlflow.keras, keras_model=keras_model, conda_env=conda_env, custom_objects=custom_objects, keras_module=keras_module, registered_model_name=registered_model_name, signature=signature, input_example=input_example, **kwargs) | -6,210,177,250,117,643,000 | Log a Keras model as an MLflow artifact for the current run.
:param keras_model: Keras model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: Either a dictionary representation of a Conda environment or
the path to a Conda environment yaml file.
If provided, this describes the environment this model should be
run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`mlflow.keras.get_default_conda_env()` environment is added to
the model. The following is an *example* dictionary representation of a
Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
.. code-block:: python
:caption: Example
from keras import Dense, layers
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Log metrics and log the model
with mlflow.start_run() as run:
mlflow.keras.log_model(keras_model, "models") | mlflow/keras.py | log_model | AnesBenmerzoug/mlflow | python | def log_model(keras_model, artifact_path, conda_env=None, custom_objects=None, keras_module=None, registered_model_name=None, signature: ModelSignature=None, input_example: ModelInputExample=None, **kwargs):
'\n Log a Keras model as an MLflow artifact for the current run.\n\n :param keras_model: Keras model to be saved.\n :param artifact_path: Run-relative artifact path.\n :param conda_env: Either a dictionary representation of a Conda environment or\n the path to a Conda environment yaml file.\n If provided, this describes the environment this model should be\n run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If ``None``, the default\n :func:`mlflow.keras.get_default_conda_env()` environment is added to\n the model. The following is an *example* dictionary representation of a\n Conda environment::\n\n {\n \'name\': \'mlflow-env\',\n \'channels\': [\'defaults\'],\n \'dependencies\': [\n \'python=3.7.0\',\n \'keras=2.2.4\',\n \'tensorflow=1.8.0\'\n ]\n }\n\n :param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to\n custom classes or functions associated with the Keras model. MLflow saves\n these custom layers using CloudPickle and restores them automatically\n when the model is loaded with :py:func:`mlflow.keras.load_model` and\n :py:func:`mlflow.pyfunc.load_model`.\n :param keras_module: Keras module to be used to save / load the model\n (``keras`` or ``tf.keras``). If not provided, MLflow will\n attempt to infer the Keras module based on the given model.\n :param registered_model_name: (Experimental) If given, create a model version under\n ``registered_model_name``, also creating a registered model if one\n with the given name does not exist.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column("target_label")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n\n :param kwargs: kwargs to pass to ``keras_model.save`` method.\n\n .. code-block:: python\n :caption: Example\n\n from keras import Dense, layers\n import mlflow\n # Build, compile, and train your model\n keras_model = ...\n keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])\n results = keras_model.fit(\n x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))\n # Log metrics and log the model\n with mlflow.start_run() as run:\n mlflow.keras.log_model(keras_model, "models")\n '
Model.log(artifact_path=artifact_path, flavor=mlflow.keras, keras_model=keras_model, conda_env=conda_env, custom_objects=custom_objects, keras_module=keras_module, registered_model_name=registered_model_name, signature=signature, input_example=input_example, **kwargs) |
def _save_custom_objects(path, custom_objects):
'\n Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.\n\n :param path: An absolute path that points to the data directory within /path/to/model.\n :param custom_objects: Keras ``custom_objects`` is a dictionary mapping\n names (strings) to custom classes or functions to be considered\n during deserialization. MLflow saves these custom layers using\n CloudPickle and restores them automatically when the model is\n loaded with :py:func:`mlflow.keras.load_model` and\n :py:func:`mlflow.pyfunc.load_model`.\n '
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, 'wb') as out_f:
cloudpickle.dump(custom_objects, out_f) | -3,764,244,966,678,153,700 | Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`. | mlflow/keras.py | _save_custom_objects | AnesBenmerzoug/mlflow | python | def _save_custom_objects(path, custom_objects):
'\n Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.\n\n :param path: An absolute path that points to the data directory within /path/to/model.\n :param custom_objects: Keras ``custom_objects`` is a dictionary mapping\n names (strings) to custom classes or functions to be considered\n during deserialization. MLflow saves these custom layers using\n CloudPickle and restores them automatically when the model is\n loaded with :py:func:`mlflow.keras.load_model` and\n :py:func:`mlflow.pyfunc.load_model`.\n '
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, 'wb') as out_f:
cloudpickle.dump(custom_objects, out_f) |
def _load_pyfunc(path):
'\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.\n\n :param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.\n '
import tensorflow as tf
if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)):
with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH), 'r') as f:
keras_module = importlib.import_module(f.read())
else:
import keras
keras_module = keras
K = importlib.import_module((keras_module.__name__ + '.backend'))
if ((keras_module.__name__ == 'tensorflow.keras') or (K.backend() == 'tensorflow')):
if (LooseVersion(tf.__version__) < LooseVersion('2.0.0')):
graph = tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default():
with sess.as_default():
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, graph, sess)
else:
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, None, None)
else:
raise MlflowException(("Unsupported backend '%s'" % K._BACKEND)) | -1,494,293,613,030,853,000 | Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``keras`` flavor. | mlflow/keras.py | _load_pyfunc | AnesBenmerzoug/mlflow | python | def _load_pyfunc(path):
'\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.\n\n :param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.\n '
import tensorflow as tf
if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)):
with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH), 'r') as f:
keras_module = importlib.import_module(f.read())
else:
import keras
keras_module = keras
K = importlib.import_module((keras_module.__name__ + '.backend'))
if ((keras_module.__name__ == 'tensorflow.keras') or (K.backend() == 'tensorflow')):
if (LooseVersion(tf.__version__) < LooseVersion('2.0.0')):
graph = tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default():
with sess.as_default():
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, graph, sess)
else:
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, None, None)
else:
raise MlflowException(("Unsupported backend '%s'" % K._BACKEND)) |
def load_model(model_uri, **kwargs):
'\n Load a Keras model from a local file or a run.\n\n Extra arguments are passed through to keras.load_model.\n\n :param model_uri: The location, in URI format, of the MLflow model. For example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :return: A Keras model instance.\n\n .. code-block:: python\n :caption: Example\n\n # Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame\n keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models")\n predictions = keras_model.predict(x_test)\n '
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
keras_module = importlib.import_module(flavor_conf.get('keras_module', 'keras'))
keras_model_artifacts_path = os.path.join(local_model_path, flavor_conf.get('data', _MODEL_SAVE_PATH))
return _load_model(model_path=keras_model_artifacts_path, keras_module=keras_module, **kwargs) | 2,781,844,495,470,014,500 | Load a Keras model from a local file or a run.
Extra arguments are passed through to keras.load_model.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:return: A Keras model instance.
.. code-block:: python
:caption: Example
# Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame
keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models")
predictions = keras_model.predict(x_test) | mlflow/keras.py | load_model | AnesBenmerzoug/mlflow | python | def load_model(model_uri, **kwargs):
'\n Load a Keras model from a local file or a run.\n\n Extra arguments are passed through to keras.load_model.\n\n :param model_uri: The location, in URI format, of the MLflow model. For example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :return: A Keras model instance.\n\n .. code-block:: python\n :caption: Example\n\n # Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame\n keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models")\n predictions = keras_model.predict(x_test)\n '
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
keras_module = importlib.import_module(flavor_conf.get('keras_module', 'keras'))
keras_model_artifacts_path = os.path.join(local_model_path, flavor_conf.get('data', _MODEL_SAVE_PATH))
return _load_model(model_path=keras_model_artifacts_path, keras_module=keras_module, **kwargs) |
@experimental
def autolog():
'\n Enables automatic logging from Keras to MLflow. Autologging captures the following information:\n\n **Metrics** and **Parameters**\n - Training loss; validation loss; user-specified metrics\n - Metrics associated with the ``EarlyStopping`` callbacks: ``stopped_epoch``,\n ``restored_epoch``, ``restore_best_weight``, ``last_epoch``, etc\n - ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon\n - ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``: ``min_delta``,\n ``patience``, ``baseline``, ``restore_best_weights``, etc\n **Artifacts**\n - Model summary on training start\n - `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model) on training end\n\n .. code-block:: python\n :caption: Example\n\n import mlflow\n import mlflow.keras\n # Build, compile, enable autologging, and train your model\n keras_model = ...\n keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])\n # autolog your metrics, parameters, and model\n mlflow.keras.autolog()\n results = keras_model.fit(\n x_train, y_train, epochs=20, batch_size=128, validation_data=(x_val, y_val))\n\n ``EarlyStopping Integration with Keras AutoLogging``\n\n MLflow will detect if an ``EarlyStopping`` callback is used in a ``fit()`` or\n ``fit_generator()`` call, and if the ``restore_best_weights`` parameter is set to be ``True``,\n then MLflow will log the metrics associated with the restored model as a final, extra step.\n The epoch of the restored model will also be logged as the metric ``restored_epoch``.\n This allows for easy comparison between the actual metrics of the restored model and\n the metrics of other models.\n\n If ``restore_best_weights`` is set to be ``False``, then MLflow will not log an additional step.\n\n Regardless of ``restore_best_weights``, MLflow will also log ``stopped_epoch``,\n which indicates the epoch at which training stopped due to early stopping.\n\n If training does not end due to early stopping, then ``stopped_epoch`` will be logged as ``0``.\n\n MLflow will also log the parameters of the ``EarlyStopping`` callback,\n excluding ``mode`` and ``verbose``.\n '
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
'\n Callback for auto-logging metrics and parameters.\n Records available logs after each epoch.\n Records model structural information as params when training begins\n '
def on_train_begin(self, logs=None):
try_mlflow_log(mlflow.log_param, 'num_layers', len(self.model.layers))
try_mlflow_log(mlflow.log_param, 'optimizer_name', type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, 'lr'):
lr = (self.model.optimizer.lr if (type(self.model.optimizer.lr) is float) else keras.backend.eval(self.model.optimizer.lr))
try_mlflow_log(mlflow.log_param, 'learning_rate', lr)
if hasattr(self.model.optimizer, 'epsilon'):
epsilon = (self.model.optimizer.epsilon if (type(self.model.optimizer.epsilon) is float) else keras.backend.eval(self.model.optimizer.epsilon))
try_mlflow_log(mlflow.log_param, 'epsilon', epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
tempdir = tempfile.mkdtemp()
try:
summary_file = os.path.join(tempdir, 'model_summary.txt')
with open(summary_file, 'w') as f:
f.write(summary)
try_mlflow_log(mlflow.log_artifact, local_path=summary_file)
finally:
shutil.rmtree(tempdir)
def on_epoch_end(self, epoch, logs=None):
if (not logs):
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None):
try_mlflow_log(log_model, self.model, artifact_path='model')
def _implements_train_batch_hooks(self):
return False
def _implements_test_batch_hooks(self):
return False
def _implements_predict_batch_hooks(self):
return False
def _early_stop_check(callbacks):
if (LooseVersion(keras.__version__) < LooseVersion('2.3.0')):
es_callback = keras.callbacks.EarlyStopping
else:
es_callback = keras.callbacks.callbacks.EarlyStopping
for callback in callbacks:
if isinstance(callback, es_callback):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {'monitor': callback.monitor, 'min_delta': callback.min_delta, 'patience': callback.patience, 'baseline': callback.baseline, 'restore_best_weights': callback.restore_best_weights}
try_mlflow_log(mlflow.log_params, earlystopping_params)
except Exception:
return
def _get_early_stop_callback_attrs(callback):
try:
return (callback.stopped_epoch, callback.restore_best_weights, callback.patience)
except Exception:
return None
def _log_early_stop_callback_metrics(callback, history):
if callback:
callback_attrs = _get_early_stop_callback_attrs(callback)
if (callback_attrs is None):
return
(stopped_epoch, restore_best_weights, patience) = callback_attrs
try_mlflow_log(mlflow.log_metric, 'stopped_epoch', stopped_epoch)
if ((stopped_epoch != 0) and restore_best_weights):
restored_epoch = (stopped_epoch - max(1, patience))
try_mlflow_log(mlflow.log_metric, 'restored_epoch', restored_epoch)
restored_metrics = {key: history.history[key][restored_epoch] for key in history.history.keys()}
metric_key = next(iter(history.history), None)
if (metric_key is not None):
last_epoch = len(history.history[metric_key])
try_mlflow_log(mlflow.log_metrics, restored_metrics, step=last_epoch)
def _run_and_log_function(self, original, args, kwargs, unlogged_params, callback_arg_index):
if (not mlflow.active_run()):
try_mlflow_log(mlflow.start_run)
auto_end_run = True
else:
auto_end_run = False
log_fn_args_as_params(original, args, kwargs, unlogged_params)
early_stop_callback = None
if (len(args) > callback_arg_index):
tmp_list = list(args)
early_stop_callback = _early_stop_check(tmp_list[callback_arg_index])
tmp_list[callback_arg_index] += [__MLflowKerasCallback()]
args = tuple(tmp_list)
elif ('callbacks' in kwargs):
early_stop_callback = _early_stop_check(kwargs['callbacks'])
kwargs['callbacks'] += [__MLflowKerasCallback()]
else:
kwargs['callbacks'] = [__MLflowKerasCallback()]
_log_early_stop_callback_params(early_stop_callback)
history = original(self, *args, **kwargs)
_log_early_stop_callback_metrics(early_stop_callback, history)
if auto_end_run:
try_mlflow_log(mlflow.end_run)
return history
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit')
unlogged_params = ['self', 'x', 'y', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 5)
@gorilla.patch(keras.Model)
def fit_generator(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit_generator')
unlogged_params = ['self', 'generator', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 4)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
gorilla.apply(gorilla.Patch(keras.Model, 'fit', fit, settings=settings))
gorilla.apply(gorilla.Patch(keras.Model, 'fit_generator', fit_generator, settings=settings)) | -6,303,646,048,311,755,000 | Enables automatic logging from Keras to MLflow. Autologging captures the following information:
**Metrics** and **Parameters**
- Training loss; validation loss; user-specified metrics
- Metrics associated with the ``EarlyStopping`` callbacks: ``stopped_epoch``,
``restored_epoch``, ``restore_best_weight``, ``last_epoch``, etc
- ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon
- ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``: ``min_delta``,
``patience``, ``baseline``, ``restore_best_weights``, etc
**Artifacts**
- Model summary on training start
- `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model) on training end
.. code-block:: python
:caption: Example
import mlflow
import mlflow.keras
# Build, compile, enable autologging, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
# autolog your metrics, parameters, and model
mlflow.keras.autolog()
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size=128, validation_data=(x_val, y_val))
``EarlyStopping Integration with Keras AutoLogging``
MLflow will detect if an ``EarlyStopping`` callback is used in a ``fit()`` or
``fit_generator()`` call, and if the ``restore_best_weights`` parameter is set to be ``True``,
then MLflow will log the metrics associated with the restored model as a final, extra step.
The epoch of the restored model will also be logged as the metric ``restored_epoch``.
This allows for easy comparison between the actual metrics of the restored model and
the metrics of other models.
If ``restore_best_weights`` is set to be ``False``, then MLflow will not log an additional step.
Regardless of ``restore_best_weights``, MLflow will also log ``stopped_epoch``,
which indicates the epoch at which training stopped due to early stopping.
If training does not end due to early stopping, then ``stopped_epoch`` will be logged as ``0``.
MLflow will also log the parameters of the ``EarlyStopping`` callback,
excluding ``mode`` and ``verbose``. | mlflow/keras.py | autolog | AnesBenmerzoug/mlflow | python | @experimental
def autolog():
'\n Enables automatic logging from Keras to MLflow. Autologging captures the following information:\n\n **Metrics** and **Parameters**\n - Training loss; validation loss; user-specified metrics\n - Metrics associated with the ``EarlyStopping`` callbacks: ``stopped_epoch``,\n ``restored_epoch``, ``restore_best_weight``, ``last_epoch``, etc\n - ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon\n - ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``: ``min_delta``,\n ``patience``, ``baseline``, ``restore_best_weights``, etc\n **Artifacts**\n - Model summary on training start\n - `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model) on training end\n\n .. code-block:: python\n :caption: Example\n\n import mlflow\n import mlflow.keras\n # Build, compile, enable autologging, and train your model\n keras_model = ...\n keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])\n # autolog your metrics, parameters, and model\n mlflow.keras.autolog()\n results = keras_model.fit(\n x_train, y_train, epochs=20, batch_size=128, validation_data=(x_val, y_val))\n\n ``EarlyStopping Integration with Keras AutoLogging``\n\n MLflow will detect if an ``EarlyStopping`` callback is used in a ``fit()`` or\n ``fit_generator()`` call, and if the ``restore_best_weights`` parameter is set to be ``True``,\n then MLflow will log the metrics associated with the restored model as a final, extra step.\n The epoch of the restored model will also be logged as the metric ``restored_epoch``.\n This allows for easy comparison between the actual metrics of the restored model and\n the metrics of other models.\n\n If ``restore_best_weights`` is set to be ``False``, then MLflow will not log an additional step.\n\n Regardless of ``restore_best_weights``, MLflow will also log ``stopped_epoch``,\n which indicates the epoch at which training stopped due to early stopping.\n\n If training does not end due to early stopping, then ``stopped_epoch`` will be logged as ``0``.\n\n MLflow will also log the parameters of the ``EarlyStopping`` callback,\n excluding ``mode`` and ``verbose``.\n '
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
'\n Callback for auto-logging metrics and parameters.\n Records available logs after each epoch.\n Records model structural information as params when training begins\n '
def on_train_begin(self, logs=None):
try_mlflow_log(mlflow.log_param, 'num_layers', len(self.model.layers))
try_mlflow_log(mlflow.log_param, 'optimizer_name', type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, 'lr'):
lr = (self.model.optimizer.lr if (type(self.model.optimizer.lr) is float) else keras.backend.eval(self.model.optimizer.lr))
try_mlflow_log(mlflow.log_param, 'learning_rate', lr)
if hasattr(self.model.optimizer, 'epsilon'):
epsilon = (self.model.optimizer.epsilon if (type(self.model.optimizer.epsilon) is float) else keras.backend.eval(self.model.optimizer.epsilon))
try_mlflow_log(mlflow.log_param, 'epsilon', epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
tempdir = tempfile.mkdtemp()
try:
summary_file = os.path.join(tempdir, 'model_summary.txt')
with open(summary_file, 'w') as f:
f.write(summary)
try_mlflow_log(mlflow.log_artifact, local_path=summary_file)
finally:
shutil.rmtree(tempdir)
def on_epoch_end(self, epoch, logs=None):
if (not logs):
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None):
try_mlflow_log(log_model, self.model, artifact_path='model')
def _implements_train_batch_hooks(self):
return False
def _implements_test_batch_hooks(self):
return False
def _implements_predict_batch_hooks(self):
return False
def _early_stop_check(callbacks):
if (LooseVersion(keras.__version__) < LooseVersion('2.3.0')):
es_callback = keras.callbacks.EarlyStopping
else:
es_callback = keras.callbacks.callbacks.EarlyStopping
for callback in callbacks:
if isinstance(callback, es_callback):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {'monitor': callback.monitor, 'min_delta': callback.min_delta, 'patience': callback.patience, 'baseline': callback.baseline, 'restore_best_weights': callback.restore_best_weights}
try_mlflow_log(mlflow.log_params, earlystopping_params)
except Exception:
return
def _get_early_stop_callback_attrs(callback):
try:
return (callback.stopped_epoch, callback.restore_best_weights, callback.patience)
except Exception:
return None
def _log_early_stop_callback_metrics(callback, history):
if callback:
callback_attrs = _get_early_stop_callback_attrs(callback)
if (callback_attrs is None):
return
(stopped_epoch, restore_best_weights, patience) = callback_attrs
try_mlflow_log(mlflow.log_metric, 'stopped_epoch', stopped_epoch)
if ((stopped_epoch != 0) and restore_best_weights):
restored_epoch = (stopped_epoch - max(1, patience))
try_mlflow_log(mlflow.log_metric, 'restored_epoch', restored_epoch)
restored_metrics = {key: history.history[key][restored_epoch] for key in history.history.keys()}
metric_key = next(iter(history.history), None)
if (metric_key is not None):
last_epoch = len(history.history[metric_key])
try_mlflow_log(mlflow.log_metrics, restored_metrics, step=last_epoch)
def _run_and_log_function(self, original, args, kwargs, unlogged_params, callback_arg_index):
if (not mlflow.active_run()):
try_mlflow_log(mlflow.start_run)
auto_end_run = True
else:
auto_end_run = False
log_fn_args_as_params(original, args, kwargs, unlogged_params)
early_stop_callback = None
if (len(args) > callback_arg_index):
tmp_list = list(args)
early_stop_callback = _early_stop_check(tmp_list[callback_arg_index])
tmp_list[callback_arg_index] += [__MLflowKerasCallback()]
args = tuple(tmp_list)
elif ('callbacks' in kwargs):
early_stop_callback = _early_stop_check(kwargs['callbacks'])
kwargs['callbacks'] += [__MLflowKerasCallback()]
else:
kwargs['callbacks'] = [__MLflowKerasCallback()]
_log_early_stop_callback_params(early_stop_callback)
history = original(self, *args, **kwargs)
_log_early_stop_callback_metrics(early_stop_callback, history)
if auto_end_run:
try_mlflow_log(mlflow.end_run)
return history
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit')
unlogged_params = ['self', 'x', 'y', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 5)
@gorilla.patch(keras.Model)
def fit_generator(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit_generator')
unlogged_params = ['self', 'generator', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 4)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
gorilla.apply(gorilla.Patch(keras.Model, 'fit', fit, settings=settings))
gorilla.apply(gorilla.Patch(keras.Model, 'fit_generator', fit_generator, settings=settings)) |
def __init__(self, request_id=None, loadbalancer=None):
'UpdateLoadBalancerResponse - a model defined in huaweicloud sdk'
super(UpdateLoadBalancerResponse, self).__init__()
self._request_id = None
self._loadbalancer = None
self.discriminator = None
if (request_id is not None):
self.request_id = request_id
if (loadbalancer is not None):
self.loadbalancer = loadbalancer | -8,088,897,903,473,774,000 | UpdateLoadBalancerResponse - a model defined in huaweicloud sdk | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | __init__ | JeffreyDin/huaweicloud-sdk-python-v3 | python | def __init__(self, request_id=None, loadbalancer=None):
super(UpdateLoadBalancerResponse, self).__init__()
self._request_id = None
self._loadbalancer = None
self.discriminator = None
if (request_id is not None):
self.request_id = request_id
if (loadbalancer is not None):
self.loadbalancer = loadbalancer |
@property
def request_id(self):
'Gets the request_id of this UpdateLoadBalancerResponse.\n\n 请求ID。 注:自动生成 。\n\n :return: The request_id of this UpdateLoadBalancerResponse.\n :rtype: str\n '
return self._request_id | -8,462,964,027,465,042,000 | Gets the request_id of this UpdateLoadBalancerResponse.
请求ID。 注:自动生成 。
:return: The request_id of this UpdateLoadBalancerResponse.
:rtype: str | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | request_id | JeffreyDin/huaweicloud-sdk-python-v3 | python | @property
def request_id(self):
'Gets the request_id of this UpdateLoadBalancerResponse.\n\n 请求ID。 注:自动生成 。\n\n :return: The request_id of this UpdateLoadBalancerResponse.\n :rtype: str\n '
return self._request_id |
@request_id.setter
def request_id(self, request_id):
'Sets the request_id of this UpdateLoadBalancerResponse.\n\n 请求ID。 注:自动生成 。\n\n :param request_id: The request_id of this UpdateLoadBalancerResponse.\n :type: str\n '
self._request_id = request_id | 72,391,224,966,096,530 | Sets the request_id of this UpdateLoadBalancerResponse.
请求ID。 注:自动生成 。
:param request_id: The request_id of this UpdateLoadBalancerResponse.
:type: str | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | request_id | JeffreyDin/huaweicloud-sdk-python-v3 | python | @request_id.setter
def request_id(self, request_id):
'Sets the request_id of this UpdateLoadBalancerResponse.\n\n 请求ID。 注:自动生成 。\n\n :param request_id: The request_id of this UpdateLoadBalancerResponse.\n :type: str\n '
self._request_id = request_id |
@property
def loadbalancer(self):
'Gets the loadbalancer of this UpdateLoadBalancerResponse.\n\n\n :return: The loadbalancer of this UpdateLoadBalancerResponse.\n :rtype: LoadBalancer\n '
return self._loadbalancer | 6,225,271,543,845,143,000 | Gets the loadbalancer of this UpdateLoadBalancerResponse.
:return: The loadbalancer of this UpdateLoadBalancerResponse.
:rtype: LoadBalancer | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | loadbalancer | JeffreyDin/huaweicloud-sdk-python-v3 | python | @property
def loadbalancer(self):
'Gets the loadbalancer of this UpdateLoadBalancerResponse.\n\n\n :return: The loadbalancer of this UpdateLoadBalancerResponse.\n :rtype: LoadBalancer\n '
return self._loadbalancer |
@loadbalancer.setter
def loadbalancer(self, loadbalancer):
'Sets the loadbalancer of this UpdateLoadBalancerResponse.\n\n\n :param loadbalancer: The loadbalancer of this UpdateLoadBalancerResponse.\n :type: LoadBalancer\n '
self._loadbalancer = loadbalancer | -8,328,612,680,979,810,000 | Sets the loadbalancer of this UpdateLoadBalancerResponse.
:param loadbalancer: The loadbalancer of this UpdateLoadBalancerResponse.
:type: LoadBalancer | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | loadbalancer | JeffreyDin/huaweicloud-sdk-python-v3 | python | @loadbalancer.setter
def loadbalancer(self, loadbalancer):
'Sets the loadbalancer of this UpdateLoadBalancerResponse.\n\n\n :param loadbalancer: The loadbalancer of this UpdateLoadBalancerResponse.\n :type: LoadBalancer\n '
self._loadbalancer = loadbalancer |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result | 2,594,216,033,120,720,000 | Returns the model properties as a dict | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | to_dict | JeffreyDin/huaweicloud-sdk-python-v3 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | to_str | JeffreyDin/huaweicloud-sdk-python-v3 | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | __repr__ | JeffreyDin/huaweicloud-sdk-python-v3 | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, UpdateLoadBalancerResponse)):
return False
return (self.__dict__ == other.__dict__) | -2,017,557,468,176,576,300 | Returns true if both objects are equal | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | __eq__ | JeffreyDin/huaweicloud-sdk-python-v3 | python | def __eq__(self, other):
if (not isinstance(other, UpdateLoadBalancerResponse)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/update_load_balancer_response.py | __ne__ | JeffreyDin/huaweicloud-sdk-python-v3 | python | def __ne__(self, other):
return (not (self == other)) |
def get_regions():
'Return an array of the regions this account is active in. Ordered with us-east-1 in the front.'
ec2 = boto3.client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
if (r['RegionName'] == 'us-east-1'):
continue
output.append(r['RegionName'])
return output | 605,186,307,391,275,400 | Return an array of the regions this account is active in. Ordered with us-east-1 in the front. | scripts/extract_findings_to_csv.py | get_regions | jchrisfarris/aws-macie-automations | python | def get_regions():
ec2 = boto3.client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
if (r['RegionName'] == 'us-east-1'):
continue
output.append(r['RegionName'])
return output |
def _get_rc_timezone():
'\n Retrieve the preferred timeszone from the rcParams dictionary.\n '
s = matplotlib.rcParams['timezone']
if (s == 'UTC'):
return UTC
import pytz
return pytz.timezone(s) | 8,285,655,359,077,192,000 | Retrieve the preferred timeszone from the rcParams dictionary. | env/lib/python2.7/site-packages/matplotlib/dates.py | _get_rc_timezone | rbalda/neural_ocr | python | def _get_rc_timezone():
'\n \n '
s = matplotlib.rcParams['timezone']
if (s == 'UTC'):
return UTC
import pytz
return pytz.timezone(s) |
def _to_ordinalf(dt):
'\n Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float\n days, preserving hours, minutes, seconds and microseconds. Return value\n is a :func:`float`.\n '
if (hasattr(dt, 'tzinfo') and (dt.tzinfo is not None)):
delta = dt.tzinfo.utcoffset(dt)
if (delta is not None):
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds((dt - rdt))
if (td_remainder > 0):
base += (td_remainder / SEC_PER_DAY)
return base | -4,235,236,962,482,482,700 | Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`. | env/lib/python2.7/site-packages/matplotlib/dates.py | _to_ordinalf | rbalda/neural_ocr | python | def _to_ordinalf(dt):
'\n Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float\n days, preserving hours, minutes, seconds and microseconds. Return value\n is a :func:`float`.\n '
if (hasattr(dt, 'tzinfo') and (dt.tzinfo is not None)):
delta = dt.tzinfo.utcoffset(dt)
if (delta is not None):
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds((dt - rdt))
if (td_remainder > 0):
base += (td_remainder / SEC_PER_DAY)
return base |
def _from_ordinalf(x, tz=None):
"\n Convert Gregorian float of the date, preserving hours, minutes,\n seconds and microseconds. Return value is a :class:`datetime`.\n\n The input date `x` is a float in ordinal days at UTC, and the output will\n be the specified :class:`datetime` object corresponding to that time in\n timezone `tz`, or if `tz` is `None`, in the timezone specified in\n `rcParams['timezone']`.\n "
if (tz is None):
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = (float(x) - ix)
dt += datetime.timedelta(microseconds=int((remainder * MUSECONDS_PER_DAY)))
if (dt.microsecond < 10):
dt = dt.replace(microsecond=0)
elif (dt.microsecond > 999990):
dt += datetime.timedelta(microseconds=(1000000.0 - dt.microsecond))
return dt.astimezone(tz) | 6,921,709,850,315,226,000 | Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`. | env/lib/python2.7/site-packages/matplotlib/dates.py | _from_ordinalf | rbalda/neural_ocr | python | def _from_ordinalf(x, tz=None):
"\n Convert Gregorian float of the date, preserving hours, minutes,\n seconds and microseconds. Return value is a :class:`datetime`.\n\n The input date `x` is a float in ordinal days at UTC, and the output will\n be the specified :class:`datetime` object corresponding to that time in\n timezone `tz`, or if `tz` is `None`, in the timezone specified in\n `rcParams['timezone']`.\n "
if (tz is None):
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = (float(x) - ix)
dt += datetime.timedelta(microseconds=int((remainder * MUSECONDS_PER_DAY)))
if (dt.microsecond < 10):
dt = dt.replace(microsecond=0)
elif (dt.microsecond > 999990):
dt += datetime.timedelta(microseconds=(1000000.0 - dt.microsecond))
return dt.astimezone(tz) |
def datestr2num(d, default=None):
'\n Convert a date string to a datenum using\n :func:`dateutil.parser.parse`.\n\n Parameters\n ----------\n d : string or sequence of strings\n The dates to convert.\n\n default : datetime instance\n The default date to use when fields are missing in `d`.\n '
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if (default is not None):
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if (not d.size):
return d
return date2num(_dateutil_parser_parse_np_vectorized(d)) | 3,791,047,595,217,189,000 | Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`. | env/lib/python2.7/site-packages/matplotlib/dates.py | datestr2num | rbalda/neural_ocr | python | def datestr2num(d, default=None):
'\n Convert a date string to a datenum using\n :func:`dateutil.parser.parse`.\n\n Parameters\n ----------\n d : string or sequence of strings\n The dates to convert.\n\n default : datetime instance\n The default date to use when fields are missing in `d`.\n '
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if (default is not None):
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if (not d.size):
return d
return date2num(_dateutil_parser_parse_np_vectorized(d)) |
def date2num(d):
'\n *d* is either a :class:`datetime` instance or a sequence of datetimes.\n\n Return value is a floating point number (or sequence of floats)\n which gives the number of days (fraction part represents hours,\n minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.\n The addition of one here is a historical artifact. Also, note\n that the Gregorian calendar is assumed; this is not universal\n practice. For details, see the module docstring.\n '
if (not cbook.iterable(d)):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if (not d.size):
return d
return _to_ordinalf_np_vectorized(d) | 2,110,665,873,702,849,000 | *d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring. | env/lib/python2.7/site-packages/matplotlib/dates.py | date2num | rbalda/neural_ocr | python | def date2num(d):
'\n *d* is either a :class:`datetime` instance or a sequence of datetimes.\n\n Return value is a floating point number (or sequence of floats)\n which gives the number of days (fraction part represents hours,\n minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.\n The addition of one here is a historical artifact. Also, note\n that the Gregorian calendar is assumed; this is not universal\n practice. For details, see the module docstring.\n '
if (not cbook.iterable(d)):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if (not d.size):
return d
return _to_ordinalf_np_vectorized(d) |
def julian2num(j):
'\n Convert a Julian date (or sequence) to a matplotlib date (or sequence).\n '
if cbook.iterable(j):
j = np.asarray(j)
return (j - JULIAN_OFFSET) | 6,728,965,020,165,259,000 | Convert a Julian date (or sequence) to a matplotlib date (or sequence). | env/lib/python2.7/site-packages/matplotlib/dates.py | julian2num | rbalda/neural_ocr | python | def julian2num(j):
'\n \n '
if cbook.iterable(j):
j = np.asarray(j)
return (j - JULIAN_OFFSET) |
def num2julian(n):
'\n Convert a matplotlib date (or sequence) to a Julian date (or sequence).\n '
if cbook.iterable(n):
n = np.asarray(n)
return (n + JULIAN_OFFSET) | -5,886,472,614,472,625,000 | Convert a matplotlib date (or sequence) to a Julian date (or sequence). | env/lib/python2.7/site-packages/matplotlib/dates.py | num2julian | rbalda/neural_ocr | python | def num2julian(n):
'\n \n '
if cbook.iterable(n):
n = np.asarray(n)
return (n + JULIAN_OFFSET) |
def num2date(x, tz=None):
'\n *x* is a float value which gives the number of days\n (fraction part represents hours, minutes, seconds) since\n 0001-01-01 00:00:00 UTC *plus* *one*.\n The addition of one here is a historical artifact. Also, note\n that the Gregorian calendar is assumed; this is not universal\n practice. For details, see the module docstring.\n\n Return value is a :class:`datetime` instance in timezone *tz* (default to\n rcparams TZ value).\n\n If *x* is a sequence, a sequence of :class:`datetime` objects will\n be returned.\n '
if (tz is None):
tz = _get_rc_timezone()
if (not cbook.iterable(x)):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if (not x.size):
return x
return _from_ordinalf_np_vectorized(x, tz).tolist() | -4,338,585,028,737,360,400 | *x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned. | env/lib/python2.7/site-packages/matplotlib/dates.py | num2date | rbalda/neural_ocr | python | def num2date(x, tz=None):
'\n *x* is a float value which gives the number of days\n (fraction part represents hours, minutes, seconds) since\n 0001-01-01 00:00:00 UTC *plus* *one*.\n The addition of one here is a historical artifact. Also, note\n that the Gregorian calendar is assumed; this is not universal\n practice. For details, see the module docstring.\n\n Return value is a :class:`datetime` instance in timezone *tz* (default to\n rcparams TZ value).\n\n If *x* is a sequence, a sequence of :class:`datetime` objects will\n be returned.\n '
if (tz is None):
tz = _get_rc_timezone()
if (not cbook.iterable(x)):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if (not x.size):
return x
return _from_ordinalf_np_vectorized(x, tz).tolist() |
def drange(dstart, dend, delta):
'\n Return a date range as float Gregorian ordinals. *dstart* and\n *dend* are :class:`datetime` instances. *delta* is a\n :class:`datetime.timedelta` instance.\n '
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = (_total_seconds(delta) / SEC_PER_DAY)
num = int(np.ceil(((f2 - f1) / step)))
dinterval_end = (dstart + (num * delta))
if (dinterval_end >= dend):
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end)
return np.linspace(f1, f2, (num + 1)) | 2,959,573,575,103,759,400 | Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance. | env/lib/python2.7/site-packages/matplotlib/dates.py | drange | rbalda/neural_ocr | python | def drange(dstart, dend, delta):
'\n Return a date range as float Gregorian ordinals. *dstart* and\n *dend* are :class:`datetime` instances. *delta* is a\n :class:`datetime.timedelta` instance.\n '
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = (_total_seconds(delta) / SEC_PER_DAY)
num = int(np.ceil(((f2 - f1) / step)))
dinterval_end = (dstart + (num * delta))
if (dinterval_end >= dend):
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end)
return np.linspace(f1, f2, (num + 1)) |
def _close_to_dt(d1, d2, epsilon=5):
'\n Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.\n '
delta = (d2 - d1)
mus = abs((_total_seconds(delta) * 1000000.0))
assert (mus < epsilon) | -2,725,794,435,686,897,000 | Assert that datetimes *d1* and *d2* are within *epsilon* microseconds. | env/lib/python2.7/site-packages/matplotlib/dates.py | _close_to_dt | rbalda/neural_ocr | python | def _close_to_dt(d1, d2, epsilon=5):
'\n \n '
delta = (d2 - d1)
mus = abs((_total_seconds(delta) * 1000000.0))
assert (mus < epsilon) |
def _close_to_num(o1, o2, epsilon=5):
'\n Assert that float ordinals *o1* and *o2* are within *epsilon*\n microseconds.\n '
delta = abs(((o2 - o1) * MUSECONDS_PER_DAY))
assert (delta < epsilon) | -7,665,723,005,057,277,000 | Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds. | env/lib/python2.7/site-packages/matplotlib/dates.py | _close_to_num | rbalda/neural_ocr | python | def _close_to_num(o1, o2, epsilon=5):
'\n Assert that float ordinals *o1* and *o2* are within *epsilon*\n microseconds.\n '
delta = abs(((o2 - o1) * MUSECONDS_PER_DAY))
assert (delta < epsilon) |
def epoch2num(e):
'\n Convert an epoch or sequence of epochs to the new date format,\n that is days since 0001.\n '
return (EPOCH_OFFSET + (np.asarray(e) / SEC_PER_DAY)) | -4,557,584,763,846,818,000 | Convert an epoch or sequence of epochs to the new date format,
that is days since 0001. | env/lib/python2.7/site-packages/matplotlib/dates.py | epoch2num | rbalda/neural_ocr | python | def epoch2num(e):
'\n Convert an epoch or sequence of epochs to the new date format,\n that is days since 0001.\n '
return (EPOCH_OFFSET + (np.asarray(e) / SEC_PER_DAY)) |
def num2epoch(d):
'\n Convert days since 0001 to epoch. *d* can be a number or sequence.\n '
return ((np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY) | 3,946,712,163,784,859,600 | Convert days since 0001 to epoch. *d* can be a number or sequence. | env/lib/python2.7/site-packages/matplotlib/dates.py | num2epoch | rbalda/neural_ocr | python | def num2epoch(d):
'\n \n '
return ((np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY) |
def mx2num(mxdates):
'\n Convert mx :class:`datetime` instance (or sequence of mx\n instances) to the new date format.\n '
scalar = False
if (not cbook.iterable(mxdates)):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret | -492,982,039,525,289,340 | Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format. | env/lib/python2.7/site-packages/matplotlib/dates.py | mx2num | rbalda/neural_ocr | python | def mx2num(mxdates):
'\n Convert mx :class:`datetime` instance (or sequence of mx\n instances) to the new date format.\n '
scalar = False
if (not cbook.iterable(mxdates)):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret |
def date_ticker_factory(span, tz=None, numticks=5):
'\n Create a date locator with *numticks* (approx) and a date formatter\n for *span* in days. Return value is (locator, formatter).\n '
if (span == 0):
span = (1 / HOURS_PER_DAY)
mins = (span * MINUTES_PER_DAY)
hrs = (span * HOURS_PER_DAY)
days = span
wks = (span / DAYS_PER_WEEK)
months = (span / DAYS_PER_MONTH)
years = (span / DAYS_PER_YEAR)
if (years > numticks):
locator = YearLocator(int((years / numticks)), tz=tz)
fmt = '%Y'
elif (months > numticks):
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif (wks > numticks):
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif (days > numticks):
locator = DayLocator(interval=int(math.ceil((days / numticks))), tz=tz)
fmt = '%b %d'
elif (hrs > numticks):
locator = HourLocator(interval=int(math.ceil((hrs / numticks))), tz=tz)
fmt = '%H:%M\n%b %d'
elif (mins > numticks):
locator = MinuteLocator(interval=int(math.ceil((mins / numticks))), tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return (locator, formatter) | -7,755,151,327,285,663,000 | Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter). | env/lib/python2.7/site-packages/matplotlib/dates.py | date_ticker_factory | rbalda/neural_ocr | python | def date_ticker_factory(span, tz=None, numticks=5):
'\n Create a date locator with *numticks* (approx) and a date formatter\n for *span* in days. Return value is (locator, formatter).\n '
if (span == 0):
span = (1 / HOURS_PER_DAY)
mins = (span * MINUTES_PER_DAY)
hrs = (span * HOURS_PER_DAY)
days = span
wks = (span / DAYS_PER_WEEK)
months = (span / DAYS_PER_MONTH)
years = (span / DAYS_PER_YEAR)
if (years > numticks):
locator = YearLocator(int((years / numticks)), tz=tz)
fmt = '%Y'
elif (months > numticks):
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif (wks > numticks):
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif (days > numticks):
locator = DayLocator(interval=int(math.ceil((days / numticks))), tz=tz)
fmt = '%b %d'
elif (hrs > numticks):
locator = HourLocator(interval=int(math.ceil((hrs / numticks))), tz=tz)
fmt = '%H:%M\n%b %d'
elif (mins > numticks):
locator = MinuteLocator(interval=int(math.ceil((mins / numticks))), tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return (locator, formatter) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.