body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def core_options(f):
'Add core operation options to commands via a decorator.\n\n These are applied to the main (but not all) cli commands like\n `parse`, `lint` and `fix`.\n '
f = click.option('--dialect', default=None, help='The dialect of SQL to lint (default=ansi)')(f)
f = click.option('--templater', default=None, help='The templater to use (default=jinja)')(f)
f = click.option('--rules', default=None, help='Narrow the search to only specific rules. For example specifying `--rules L001` will only search for rule `L001` (Unnecessary trailing whitespace). Multiple rules can be specified with commas e.g. `--rules L001,L002` will specify only looking for violations of rule `L001` and rule `L002`.')(f)
f = click.option('--exclude-rules', default=None, help='Exclude specific rules. For example specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary trailing whitespace) from the set of considered rules. This could either be the whitelist, or the general set if there is no specific whitelist. Multiple rules can be specified with commas e.g. `--exclude-rules L001,L002` will exclude violations of rule `L001` and rule `L002`.')(f)
f = click.option('--ignore', default=None, help="Ignore particular families of errors so that they don't cause a failed run. For example `--ignore parsing` would mean that any parsing errors are ignored and don't influence the success or fail of a run. Multiple options are possible if comma separated e.g. `--ignore parsing,templating`.")(f)
f = click.option('--bench', is_flag=True, help='Set this flag to engage the benchmarking tool output.')(f)
f = click.option('--logger', type=click.Choice(['parser', 'linter', 'rules'], case_sensitive=False), help='Choose to limit the logging to one of the loggers.')(f)
return f | -1,442,821,497,708,866,300 | Add core operation options to commands via a decorator.
These are applied to the main (but not all) cli commands like
`parse`, `lint` and `fix`. | src/sqlfluff/cli/commands.py | core_options | tmastny/sqlfluff | python | def core_options(f):
'Add core operation options to commands via a decorator.\n\n These are applied to the main (but not all) cli commands like\n `parse`, `lint` and `fix`.\n '
f = click.option('--dialect', default=None, help='The dialect of SQL to lint (default=ansi)')(f)
f = click.option('--templater', default=None, help='The templater to use (default=jinja)')(f)
f = click.option('--rules', default=None, help='Narrow the search to only specific rules. For example specifying `--rules L001` will only search for rule `L001` (Unnecessary trailing whitespace). Multiple rules can be specified with commas e.g. `--rules L001,L002` will specify only looking for violations of rule `L001` and rule `L002`.')(f)
f = click.option('--exclude-rules', default=None, help='Exclude specific rules. For example specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary trailing whitespace) from the set of considered rules. This could either be the whitelist, or the general set if there is no specific whitelist. Multiple rules can be specified with commas e.g. `--exclude-rules L001,L002` will exclude violations of rule `L001` and rule `L002`.')(f)
f = click.option('--ignore', default=None, help="Ignore particular families of errors so that they don't cause a failed run. For example `--ignore parsing` would mean that any parsing errors are ignored and don't influence the success or fail of a run. Multiple options are possible if comma separated e.g. `--ignore parsing,templating`.")(f)
f = click.option('--bench', is_flag=True, help='Set this flag to engage the benchmarking tool output.')(f)
f = click.option('--logger', type=click.Choice(['parser', 'linter', 'rules'], case_sensitive=False), help='Choose to limit the logging to one of the loggers.')(f)
return f |
def get_config(**kwargs):
'Get a config object from kwargs.'
if kwargs.get('dialect', None):
try:
dialect_selector(kwargs['dialect'])
except KeyError:
click.echo('Error: Unknown dialect {0!r}'.format(kwargs['dialect']))
sys.exit(66)
overrides = {k: kwargs[k] for k in kwargs if (kwargs[k] is not None)}
return FluffConfig.from_root(overrides=overrides) | -8,768,302,088,982,718,000 | Get a config object from kwargs. | src/sqlfluff/cli/commands.py | get_config | tmastny/sqlfluff | python | def get_config(**kwargs):
if kwargs.get('dialect', None):
try:
dialect_selector(kwargs['dialect'])
except KeyError:
click.echo('Error: Unknown dialect {0!r}'.format(kwargs['dialect']))
sys.exit(66)
overrides = {k: kwargs[k] for k in kwargs if (kwargs[k] is not None)}
return FluffConfig.from_root(overrides=overrides) |
def get_linter_and_formatter(cfg, silent=False):
'Get a linter object given a config.'
try:
dialect_selector(cfg.get('dialect'))
except KeyError:
click.echo('Error: Unknown dialect {0!r}'.format(cfg.get('dialect')))
sys.exit(66)
if (not silent):
formatter = CallbackFormatter(callback=(lambda m: click.echo(m, color=cfg.get('color'))), verbosity=cfg.get('verbose'), output_line_length=cfg.get('output_line_length'))
return (Linter(config=cfg, formatter=formatter), formatter)
else:
formatter = CallbackFormatter(callback=(lambda m: None), verbosity=0)
return (Linter(config=cfg), formatter) | -1,084,640,377,417,276,700 | Get a linter object given a config. | src/sqlfluff/cli/commands.py | get_linter_and_formatter | tmastny/sqlfluff | python | def get_linter_and_formatter(cfg, silent=False):
try:
dialect_selector(cfg.get('dialect'))
except KeyError:
click.echo('Error: Unknown dialect {0!r}'.format(cfg.get('dialect')))
sys.exit(66)
if (not silent):
formatter = CallbackFormatter(callback=(lambda m: click.echo(m, color=cfg.get('color'))), verbosity=cfg.get('verbose'), output_line_length=cfg.get('output_line_length'))
return (Linter(config=cfg, formatter=formatter), formatter)
else:
formatter = CallbackFormatter(callback=(lambda m: None), verbosity=0)
return (Linter(config=cfg), formatter) |
@click.group()
@click.version_option()
def cli():
'Sqlfluff is a modular sql linter for humans.' | -168,103,219,434,161,920 | Sqlfluff is a modular sql linter for humans. | src/sqlfluff/cli/commands.py | cli | tmastny/sqlfluff | python | @click.group()
@click.version_option()
def cli():
|
@cli.command()
@common_options
def version(**kwargs):
'Show the version of sqlfluff.'
c = get_config(**kwargs)
if (c.get('verbose') > 0):
(lnt, formatter) = get_linter_and_formatter(c)
formatter.dispatch_config(lnt)
else:
click.echo(get_package_version(), color=c.get('color')) | -3,638,591,360,135,815,700 | Show the version of sqlfluff. | src/sqlfluff/cli/commands.py | version | tmastny/sqlfluff | python | @cli.command()
@common_options
def version(**kwargs):
c = get_config(**kwargs)
if (c.get('verbose') > 0):
(lnt, formatter) = get_linter_and_formatter(c)
formatter.dispatch_config(lnt)
else:
click.echo(get_package_version(), color=c.get('color')) |
@cli.command()
@common_options
def rules(**kwargs):
'Show the current rules in use.'
c = get_config(**kwargs)
(lnt, _) = get_linter_and_formatter(c)
click.echo(format_rules(lnt), color=c.get('color')) | 2,811,233,459,518,703,600 | Show the current rules in use. | src/sqlfluff/cli/commands.py | rules | tmastny/sqlfluff | python | @cli.command()
@common_options
def rules(**kwargs):
c = get_config(**kwargs)
(lnt, _) = get_linter_and_formatter(c)
click.echo(format_rules(lnt), color=c.get('color')) |
@cli.command()
@common_options
def dialects(**kwargs):
'Show the current dialects available.'
c = get_config(**kwargs)
click.echo(format_dialects(dialect_readout), color=c.get('color')) | -3,760,349,316,110,849,000 | Show the current dialects available. | src/sqlfluff/cli/commands.py | dialects | tmastny/sqlfluff | python | @cli.command()
@common_options
def dialects(**kwargs):
c = get_config(**kwargs)
click.echo(format_dialects(dialect_readout), color=c.get('color')) |
@cli.command()
@common_options
@core_options
@click.option('-f', '--format', 'format', default='human', type=click.Choice(['human', 'json', 'yaml'], case_sensitive=False), help='What format to return the lint result in.')
@click.option('--nofail', is_flag=True, help='If set, the exit code will always be zero, regardless of violations found. This is potentially useful during rollout.')
@click.option('--disregard-sqlfluffignores', is_flag=True, help='Perform the operation regardless of .sqlfluffignore configurations')
@click.option('-p', '--parallel', type=int, default=1, help='If set to a value higher than 1, run SQLFluff in parallel, speeding up processing.')
@click.argument('paths', nargs=(- 1))
def lint(paths, parallel, format, nofail, disregard_sqlfluffignores, logger=None, bench=False, **kwargs):
"Lint SQL files via passing a list of files or using stdin.\n\n PATH is the path to a sql file or directory to lint. This can be either a\n file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')\n character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will\n be interpreted like passing the current working directory as a path argument.\n\n Linting SQL files:\n\n sqlfluff lint path/to/file.sql\n sqlfluff lint directory/of/sql/files\n\n Linting a file via stdin (note the lone '-' character):\n\n cat path/to/file.sql | sqlfluff lint -\n echo 'select col from tbl' | sqlfluff lint -\n\n "
c = get_config(**kwargs)
non_human_output = (format in ('json', 'yaml'))
(lnt, formatter) = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get('verbose')
formatter.dispatch_config(lnt)
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
if (('-',) == paths):
result = lnt.lint_string_wrapped(sys.stdin.read(), fname='stdin')
else:
if (verbose >= 1):
click.echo(format_linting_result_header())
try:
result = lnt.lint_paths(paths, ignore_non_existent_files=False, ignore_files=(not disregard_sqlfluffignores), parallel=parallel)
except IOError:
click.echo(colorize('The path(s) {0!r} could not be accessed. Check it/they exist(s).'.format(paths), 'red'))
sys.exit(1)
if (verbose >= 1):
click.echo(format_linting_stats(result, verbose=verbose))
if (format == 'json'):
click.echo(json.dumps(result.as_records()))
elif (format == 'yaml'):
click.echo(yaml.dump(result.as_records()))
if bench:
click.echo('==== overall timings ====')
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f'=== {step} ===')
click.echo(cli_table(timing_summary[step].items()))
if (not nofail):
if (not non_human_output):
click.echo('All Finished 📜 🎉!')
sys.exit(result.stats()['exit code'])
else:
sys.exit(0) | -62,104,422,708,550,104 | Lint SQL files via passing a list of files or using stdin.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
Linting SQL files:
sqlfluff lint path/to/file.sql
sqlfluff lint directory/of/sql/files
Linting a file via stdin (note the lone '-' character):
cat path/to/file.sql | sqlfluff lint -
echo 'select col from tbl' | sqlfluff lint - | src/sqlfluff/cli/commands.py | lint | tmastny/sqlfluff | python | @cli.command()
@common_options
@core_options
@click.option('-f', '--format', 'format', default='human', type=click.Choice(['human', 'json', 'yaml'], case_sensitive=False), help='What format to return the lint result in.')
@click.option('--nofail', is_flag=True, help='If set, the exit code will always be zero, regardless of violations found. This is potentially useful during rollout.')
@click.option('--disregard-sqlfluffignores', is_flag=True, help='Perform the operation regardless of .sqlfluffignore configurations')
@click.option('-p', '--parallel', type=int, default=1, help='If set to a value higher than 1, run SQLFluff in parallel, speeding up processing.')
@click.argument('paths', nargs=(- 1))
def lint(paths, parallel, format, nofail, disregard_sqlfluffignores, logger=None, bench=False, **kwargs):
"Lint SQL files via passing a list of files or using stdin.\n\n PATH is the path to a sql file or directory to lint. This can be either a\n file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')\n character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will\n be interpreted like passing the current working directory as a path argument.\n\n Linting SQL files:\n\n sqlfluff lint path/to/file.sql\n sqlfluff lint directory/of/sql/files\n\n Linting a file via stdin (note the lone '-' character):\n\n cat path/to/file.sql | sqlfluff lint -\n echo 'select col from tbl' | sqlfluff lint -\n\n "
c = get_config(**kwargs)
non_human_output = (format in ('json', 'yaml'))
(lnt, formatter) = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get('verbose')
formatter.dispatch_config(lnt)
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
if (('-',) == paths):
result = lnt.lint_string_wrapped(sys.stdin.read(), fname='stdin')
else:
if (verbose >= 1):
click.echo(format_linting_result_header())
try:
result = lnt.lint_paths(paths, ignore_non_existent_files=False, ignore_files=(not disregard_sqlfluffignores), parallel=parallel)
except IOError:
click.echo(colorize('The path(s) {0!r} could not be accessed. Check it/they exist(s).'.format(paths), 'red'))
sys.exit(1)
if (verbose >= 1):
click.echo(format_linting_stats(result, verbose=verbose))
if (format == 'json'):
click.echo(json.dumps(result.as_records()))
elif (format == 'yaml'):
click.echo(yaml.dump(result.as_records()))
if bench:
click.echo('==== overall timings ====')
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f'=== {step} ===')
click.echo(cli_table(timing_summary[step].items()))
if (not nofail):
if (not non_human_output):
click.echo('All Finished 📜 🎉!')
sys.exit(result.stats()['exit code'])
else:
sys.exit(0) |
def do_fixes(lnt, result, formatter=None, **kwargs):
'Actually do the fixes.'
click.echo('Persisting Changes...')
res = result.persist_changes(formatter=formatter, **kwargs)
if all(res.values()):
click.echo('Done. Please check your files to confirm.')
return True
click.echo('Done. Some operations failed. Please check your files to confirm.')
click.echo('Some errors cannot be fixed or there is another error blocking it.')
return False | 4,666,379,696,113,906,000 | Actually do the fixes. | src/sqlfluff/cli/commands.py | do_fixes | tmastny/sqlfluff | python | def do_fixes(lnt, result, formatter=None, **kwargs):
click.echo('Persisting Changes...')
res = result.persist_changes(formatter=formatter, **kwargs)
if all(res.values()):
click.echo('Done. Please check your files to confirm.')
return True
click.echo('Done. Some operations failed. Please check your files to confirm.')
click.echo('Some errors cannot be fixed or there is another error blocking it.')
return False |
@cli.command()
@common_options
@core_options
@click.option('-f', '--force', is_flag=True, help='skip the confirmation prompt and go straight to applying fixes. **Use this with caution.**')
@click.option('--fixed-suffix', default=None, help='An optional suffix to add to fixed files.')
@click.option('--parallel', type=int, default=1, help='If set to a value higher than 1, run SQLFluff in parallel, speeding up processing.')
@click.argument('paths', nargs=(- 1))
def fix(force, paths, parallel, bench=False, fixed_suffix='', logger=None, **kwargs):
"Fix SQL files.\n\n PATH is the path to a sql file or directory to lint. This can be either a\n file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')\n character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will\n be interpreted like passing the current working directory as a path argument.\n "
fixing_stdin = (('-',) == paths)
c = get_config(**kwargs)
(lnt, formatter) = get_linter_and_formatter(c, silent=fixing_stdin)
verbose = c.get('verbose')
formatter.dispatch_config(lnt)
set_logging_level(verbosity=verbose, logger=logger, stderr_output=fixing_stdin)
if fixing_stdin:
stdin = sys.stdin.read()
result = lnt.lint_string_wrapped(stdin, fname='stdin', fix=True)
stdout = result.paths[0].files[0].fix_string()[0]
click.echo(stdout, nl=False)
sys.exit()
click.echo('==== finding fixable violations ====')
try:
result = lnt.lint_paths(paths, fix=True, ignore_non_existent_files=False, parallel=parallel)
except IOError:
click.echo(colorize('The path(s) {0!r} could not be accessed. Check it/they exist(s).'.format(paths), 'red'))
sys.exit(1)
if (result.num_violations(types=SQLLintError, fixable=True) > 0):
click.echo('==== fixing violations ====')
click.echo('{0} fixable linting violations found'.format(result.num_violations(types=SQLLintError, fixable=True)))
if force:
click.echo((colorize('FORCE MODE', 'red') + ': Attempting fixes...'))
success = do_fixes(lnt, result, formatter, types=SQLLintError, fixed_file_suffix=fixed_suffix)
if (not success):
sys.exit(1)
else:
click.echo('Are you sure you wish to attempt to fix these? [Y/n] ', nl=False)
c = click.getchar().lower()
click.echo('...')
if (c in ('y', '\r', '\n')):
click.echo('Attempting fixes...')
success = do_fixes(lnt, result, formatter, types=SQLLintError, fixed_file_suffix=fixed_suffix)
if (not success):
sys.exit(1)
else:
click.echo('All Finished 📜 🎉!')
elif (c == 'n'):
click.echo('Aborting...')
else:
click.echo("Invalid input, please enter 'Y' or 'N'")
click.echo('Aborting...')
else:
click.echo('==== no fixable linting violations found ====')
if (result.num_violations(types=SQLLintError, fixable=False) > 0):
click.echo(' [{0} unfixable linting violations found]'.format(result.num_violations(types=SQLLintError, fixable=False)))
click.echo('All Finished 📜 🎉!')
if bench:
click.echo('==== overall timings ====')
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f'=== {step} ===')
click.echo(cli_table(timing_summary[step].items()))
sys.exit(0) | 169,910,492,515,687,400 | Fix SQL files.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument. | src/sqlfluff/cli/commands.py | fix | tmastny/sqlfluff | python | @cli.command()
@common_options
@core_options
@click.option('-f', '--force', is_flag=True, help='skip the confirmation prompt and go straight to applying fixes. **Use this with caution.**')
@click.option('--fixed-suffix', default=None, help='An optional suffix to add to fixed files.')
@click.option('--parallel', type=int, default=1, help='If set to a value higher than 1, run SQLFluff in parallel, speeding up processing.')
@click.argument('paths', nargs=(- 1))
def fix(force, paths, parallel, bench=False, fixed_suffix=, logger=None, **kwargs):
"Fix SQL files.\n\n PATH is the path to a sql file or directory to lint. This can be either a\n file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')\n character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will\n be interpreted like passing the current working directory as a path argument.\n "
fixing_stdin = (('-',) == paths)
c = get_config(**kwargs)
(lnt, formatter) = get_linter_and_formatter(c, silent=fixing_stdin)
verbose = c.get('verbose')
formatter.dispatch_config(lnt)
set_logging_level(verbosity=verbose, logger=logger, stderr_output=fixing_stdin)
if fixing_stdin:
stdin = sys.stdin.read()
result = lnt.lint_string_wrapped(stdin, fname='stdin', fix=True)
stdout = result.paths[0].files[0].fix_string()[0]
click.echo(stdout, nl=False)
sys.exit()
click.echo('==== finding fixable violations ====')
try:
result = lnt.lint_paths(paths, fix=True, ignore_non_existent_files=False, parallel=parallel)
except IOError:
click.echo(colorize('The path(s) {0!r} could not be accessed. Check it/they exist(s).'.format(paths), 'red'))
sys.exit(1)
if (result.num_violations(types=SQLLintError, fixable=True) > 0):
click.echo('==== fixing violations ====')
click.echo('{0} fixable linting violations found'.format(result.num_violations(types=SQLLintError, fixable=True)))
if force:
click.echo((colorize('FORCE MODE', 'red') + ': Attempting fixes...'))
success = do_fixes(lnt, result, formatter, types=SQLLintError, fixed_file_suffix=fixed_suffix)
if (not success):
sys.exit(1)
else:
click.echo('Are you sure you wish to attempt to fix these? [Y/n] ', nl=False)
c = click.getchar().lower()
click.echo('...')
if (c in ('y', '\r', '\n')):
click.echo('Attempting fixes...')
success = do_fixes(lnt, result, formatter, types=SQLLintError, fixed_file_suffix=fixed_suffix)
if (not success):
sys.exit(1)
else:
click.echo('All Finished 📜 🎉!')
elif (c == 'n'):
click.echo('Aborting...')
else:
click.echo("Invalid input, please enter 'Y' or 'N'")
click.echo('Aborting...')
else:
click.echo('==== no fixable linting violations found ====')
if (result.num_violations(types=SQLLintError, fixable=False) > 0):
click.echo(' [{0} unfixable linting violations found]'.format(result.num_violations(types=SQLLintError, fixable=False)))
click.echo('All Finished 📜 🎉!')
if bench:
click.echo('==== overall timings ====')
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f'=== {step} ===')
click.echo(cli_table(timing_summary[step].items()))
sys.exit(0) |
def quoted_presenter(dumper, data):
'Re-presenter which always double quotes string values needing escapes.'
if (('\n' in data) or ('\t' in data) or ("'" in data)):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
else:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='') | -5,380,198,341,755,219,000 | Re-presenter which always double quotes string values needing escapes. | src/sqlfluff/cli/commands.py | quoted_presenter | tmastny/sqlfluff | python | def quoted_presenter(dumper, data):
if (('\n' in data) or ('\t' in data) or ("'" in data)):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
else:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style=) |
@cli.command()
@common_options
@core_options
@click.argument('path', nargs=1)
@click.option('--recurse', default=0, help='The depth to recursively parse to (0 for unlimited)')
@click.option('-c', '--code-only', is_flag=True, help='Output only the code elements of the parse tree.')
@click.option('-f', '--format', default='human', type=click.Choice(['human', 'json', 'yaml'], case_sensitive=False), help='What format to return the parse result in.')
@click.option('--profiler', is_flag=True, help='Set this flag to engage the python profiler.')
@click.option('--nofail', is_flag=True, help='If set, the exit code will always be zero, regardless of violations found. This is potentially useful during rollout.')
def parse(path, code_only, format, profiler, bench, nofail, logger=None, **kwargs):
"Parse SQL files and just spit out the result.\n\n PATH is the path to a sql file or directory to lint. This can be either a\n file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')\n character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will\n be interpreted like passing the current working directory as a path argument.\n "
c = get_config(**kwargs)
non_human_output = (format in ('json', 'yaml'))
(lnt, formatter) = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get('verbose')
recurse = c.get('recurse')
formatter.dispatch_config(lnt)
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
nv = 0
if profiler:
try:
import cProfile
except ImportError:
click.echo('The cProfiler is not available on your platform.')
sys.exit(1)
pr = cProfile.Profile()
pr.enable()
try:
if ('-' == path):
result = [lnt.parse_string(sys.stdin.read(), 'stdin', recurse=recurse, config=lnt.config)]
else:
result = lnt.parse_path(path, recurse=recurse)
if (format == 'human'):
timing = TimingSummary()
for parsed_string in result:
timing.add(parsed_string.time_dict)
if parsed_string.tree:
click.echo(parsed_string.tree.stringify(code_only=code_only))
else:
click.echo('...Failed to Parse...')
nv += len(parsed_string.violations)
if parsed_string.violations:
click.echo('==== parsing violations ====')
for v in parsed_string.violations:
click.echo(format_violation(v))
if (parsed_string.violations and (parsed_string.config.get('dialect') == 'ansi')):
click.echo(format_dialect_warning())
if (verbose >= 2):
click.echo('==== timings ====')
click.echo(cli_table(parsed_string.time_dict.items()))
if ((verbose >= 2) or bench):
click.echo('==== overall timings ====')
timing_summary = timing.summary()
for step in timing_summary:
click.echo(f'=== {step} ===')
click.echo(cli_table(timing_summary[step].items()))
else:
filepaths = (['stdin'] if ('-' == path) else lnt.paths_from_path(path))
result = [dict(filepath=filepath, segments=(parsed.as_record(code_only=code_only, show_raw=True) if parsed else None)) for (filepath, (parsed, _, _, _, _)) in zip(filepaths, result)]
if (format == 'yaml'):
yaml.add_representer(str, quoted_presenter)
click.echo(yaml.dump(result))
elif (format == 'json'):
click.echo(json.dumps(result))
except IOError:
click.echo(colorize('The path {0!r} could not be accessed. Check it exists.'.format(path), 'red'))
sys.exit(1)
if profiler:
pr.disable()
profiler_buffer = StringIO()
ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats('cumulative')
ps.print_stats()
click.echo('==== profiler stats ====')
click.echo('\n'.join(profiler_buffer.getvalue().split('\n')[:50]))
if ((nv > 0) and (not nofail)):
sys.exit(66)
else:
sys.exit(0) | -5,600,100,864,493,326,000 | Parse SQL files and just spit out the result.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument. | src/sqlfluff/cli/commands.py | parse | tmastny/sqlfluff | python | @cli.command()
@common_options
@core_options
@click.argument('path', nargs=1)
@click.option('--recurse', default=0, help='The depth to recursively parse to (0 for unlimited)')
@click.option('-c', '--code-only', is_flag=True, help='Output only the code elements of the parse tree.')
@click.option('-f', '--format', default='human', type=click.Choice(['human', 'json', 'yaml'], case_sensitive=False), help='What format to return the parse result in.')
@click.option('--profiler', is_flag=True, help='Set this flag to engage the python profiler.')
@click.option('--nofail', is_flag=True, help='If set, the exit code will always be zero, regardless of violations found. This is potentially useful during rollout.')
def parse(path, code_only, format, profiler, bench, nofail, logger=None, **kwargs):
"Parse SQL files and just spit out the result.\n\n PATH is the path to a sql file or directory to lint. This can be either a\n file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')\n character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will\n be interpreted like passing the current working directory as a path argument.\n "
c = get_config(**kwargs)
non_human_output = (format in ('json', 'yaml'))
(lnt, formatter) = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get('verbose')
recurse = c.get('recurse')
formatter.dispatch_config(lnt)
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
nv = 0
if profiler:
try:
import cProfile
except ImportError:
click.echo('The cProfiler is not available on your platform.')
sys.exit(1)
pr = cProfile.Profile()
pr.enable()
try:
if ('-' == path):
result = [lnt.parse_string(sys.stdin.read(), 'stdin', recurse=recurse, config=lnt.config)]
else:
result = lnt.parse_path(path, recurse=recurse)
if (format == 'human'):
timing = TimingSummary()
for parsed_string in result:
timing.add(parsed_string.time_dict)
if parsed_string.tree:
click.echo(parsed_string.tree.stringify(code_only=code_only))
else:
click.echo('...Failed to Parse...')
nv += len(parsed_string.violations)
if parsed_string.violations:
click.echo('==== parsing violations ====')
for v in parsed_string.violations:
click.echo(format_violation(v))
if (parsed_string.violations and (parsed_string.config.get('dialect') == 'ansi')):
click.echo(format_dialect_warning())
if (verbose >= 2):
click.echo('==== timings ====')
click.echo(cli_table(parsed_string.time_dict.items()))
if ((verbose >= 2) or bench):
click.echo('==== overall timings ====')
timing_summary = timing.summary()
for step in timing_summary:
click.echo(f'=== {step} ===')
click.echo(cli_table(timing_summary[step].items()))
else:
filepaths = (['stdin'] if ('-' == path) else lnt.paths_from_path(path))
result = [dict(filepath=filepath, segments=(parsed.as_record(code_only=code_only, show_raw=True) if parsed else None)) for (filepath, (parsed, _, _, _, _)) in zip(filepaths, result)]
if (format == 'yaml'):
yaml.add_representer(str, quoted_presenter)
click.echo(yaml.dump(result))
elif (format == 'json'):
click.echo(json.dumps(result))
except IOError:
click.echo(colorize('The path {0!r} could not be accessed. Check it exists.'.format(path), 'red'))
sys.exit(1)
if profiler:
pr.disable()
profiler_buffer = StringIO()
ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats('cumulative')
ps.print_stats()
click.echo('==== profiler stats ====')
click.echo('\n'.join(profiler_buffer.getvalue().split('\n')[:50]))
if ((nv > 0) and (not nofail)):
sys.exit(66)
else:
sys.exit(0) |
def filter(self, record):
'Filter any warnings (or above) to turn them red.'
if (record.levelno >= logging.WARNING):
record.msg = (colorize(record.msg, 'red') + ' ')
return True | -5,424,112,563,286,361,000 | Filter any warnings (or above) to turn them red. | src/sqlfluff/cli/commands.py | filter | tmastny/sqlfluff | python | def filter(self, record):
if (record.levelno >= logging.WARNING):
record.msg = (colorize(record.msg, 'red') + ' ')
return True |
@pytest.mark.parametrize('operation', all_ops)
def test_supported_gates_can_be_implemented(self, device_kwargs, operation):
'Test that the device can implement all its supported gates.'
device_kwargs['wires'] = 4
dev = qml.device(**device_kwargs)
assert hasattr(dev, 'operations')
if (operation in dev.operations):
@qml.qnode(dev)
def circuit():
ops[operation]
return qml.expval(qml.Identity(wires=0))
assert isinstance(circuit(), (float, np.ndarray)) | -3,799,289,793,350,022,700 | Test that the device can implement all its supported gates. | pennylane/devices/tests/test_gates.py | test_supported_gates_can_be_implemented | AlaricCheng/pennylane | python | @pytest.mark.parametrize('operation', all_ops)
def test_supported_gates_can_be_implemented(self, device_kwargs, operation):
device_kwargs['wires'] = 4
dev = qml.device(**device_kwargs)
assert hasattr(dev, 'operations')
if (operation in dev.operations):
@qml.qnode(dev)
def circuit():
ops[operation]
return qml.expval(qml.Identity(wires=0))
assert isinstance(circuit(), (float, np.ndarray)) |
@pytest.mark.parametrize('operation', all_ops)
def test_inverse_gates_can_be_implemented(self, device_kwargs, operation):
'Test that the device can implement the inverse of all its supported gates.\n This test is skipped for devices that do not support inverse operations.'
device_kwargs['wires'] = 4
dev = qml.device(**device_kwargs)
supports_inv = (('supports_inverse_operations' in dev.capabilities()) and dev.capabilities()['supports_inverse_operations'])
if (not supports_inv):
pytest.skip('Device does not support inverse operations.')
assert hasattr(dev, 'operations')
if (operation in dev.operations):
@qml.qnode(dev)
def circuit():
ops[operation].queue().inv()
return qml.expval(qml.Identity(wires=0))
assert isinstance(circuit(), (float, np.ndarray)) | -2,214,980,487,312,216,000 | Test that the device can implement the inverse of all its supported gates.
This test is skipped for devices that do not support inverse operations. | pennylane/devices/tests/test_gates.py | test_inverse_gates_can_be_implemented | AlaricCheng/pennylane | python | @pytest.mark.parametrize('operation', all_ops)
def test_inverse_gates_can_be_implemented(self, device_kwargs, operation):
'Test that the device can implement the inverse of all its supported gates.\n This test is skipped for devices that do not support inverse operations.'
device_kwargs['wires'] = 4
dev = qml.device(**device_kwargs)
supports_inv = (('supports_inverse_operations' in dev.capabilities()) and dev.capabilities()['supports_inverse_operations'])
if (not supports_inv):
pytest.skip('Device does not support inverse operations.')
assert hasattr(dev, 'operations')
if (operation in dev.operations):
@qml.qnode(dev)
def circuit():
ops[operation].queue().inv()
return qml.expval(qml.Identity(wires=0))
assert isinstance(circuit(), (float, np.ndarray)) |
@pytest.mark.parametrize('basis_state', [np.array([0, 0, 1, 0]), np.array([0, 0, 1, 0]), np.array([1, 0, 1, 0]), np.array([1, 1, 1, 1])])
def test_basis_state(self, device, basis_state, tol, skip_if):
'Test basis state initialization.'
n_wires = 4
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
@qml.qnode(dev)
def circuit():
qml.BasisState(basis_state, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.zeros([(2 ** n_wires)])
expected[np.ravel_multi_index(basis_state, ([2] * n_wires))] = 1
assert np.allclose(res, expected, atol=tol(dev.shots)) | -8,101,547,102,226,536,000 | Test basis state initialization. | pennylane/devices/tests/test_gates.py | test_basis_state | AlaricCheng/pennylane | python | @pytest.mark.parametrize('basis_state', [np.array([0, 0, 1, 0]), np.array([0, 0, 1, 0]), np.array([1, 0, 1, 0]), np.array([1, 1, 1, 1])])
def test_basis_state(self, device, basis_state, tol, skip_if):
n_wires = 4
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
@qml.qnode(dev)
def circuit():
qml.BasisState(basis_state, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.zeros([(2 ** n_wires)])
expected[np.ravel_multi_index(basis_state, ([2] * n_wires))] = 1
assert np.allclose(res, expected, atol=tol(dev.shots)) |
def test_qubit_state_vector(self, device, init_state, tol, skip_if):
'Test QubitStateVector initialisation.'
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
return qml.probs(range(n_wires))
res = circuit()
expected = (np.abs(rnd_state) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | 1,775,453,823,505,710,000 | Test QubitStateVector initialisation. | pennylane/devices/tests/test_gates.py | test_qubit_state_vector | AlaricCheng/pennylane | python | def test_qubit_state_vector(self, device, init_state, tol, skip_if):
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
return qml.probs(range(n_wires))
res = circuit()
expected = (np.abs(rnd_state) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('op,mat', single_qubit)
def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
'Test PauliX application.'
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | -717,173,979,131,963,300 | Test PauliX application. | pennylane/devices/tests/test_gates.py | test_single_qubit_no_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('op,mat', single_qubit)
def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('gamma', [0.5432, (- 0.232)])
@pytest.mark.parametrize('op,func', single_qubit_param)
def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
'Test single qubit gates taking a single scalar argument.'
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((func(gamma) @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | 2,037,094,671,524,941,600 | Test single qubit gates taking a single scalar argument. | pennylane/devices/tests/test_gates.py | test_single_qubit_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('gamma', [0.5432, (- 0.232)])
@pytest.mark.parametrize('op,func', single_qubit_param)
def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((func(gamma) @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
def test_rotation(self, device, init_state, tol, skip_if):
'Test three axis rotation gate.'
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
a = 0.542
b = 1.3432
c = (- 0.654)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.Rot(a, b, c, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((rot(a, b, c) @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | 8,164,240,720,336,103,000 | Test three axis rotation gate. | pennylane/devices/tests/test_gates.py | test_rotation | AlaricCheng/pennylane | python | def test_rotation(self, device, init_state, tol, skip_if):
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
a = 0.542
b = 1.3432
c = (- 0.654)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.Rot(a, b, c, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((rot(a, b, c) @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('op,mat', two_qubit)
def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
'Test two qubit gates.'
n_wires = 2
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | 8,350,486,124,864,291,000 | Test two qubit gates. | pennylane/devices/tests/test_gates.py | test_two_qubit_no_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('op,mat', two_qubit)
def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
n_wires = 2
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('param', [0.5432, (- 0.232)])
@pytest.mark.parametrize('op,func', two_qubit_param)
def test_two_qubit_parameters(self, device, init_state, op, func, param, tol, skip_if):
'Test parametrized two qubit gates taking a single scalar argument.'
n_wires = 2
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(param, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((func(param) @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | -5,837,143,849,448,713,000 | Test parametrized two qubit gates taking a single scalar argument. | pennylane/devices/tests/test_gates.py | test_two_qubit_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('param', [0.5432, (- 0.232)])
@pytest.mark.parametrize('op,func', two_qubit_param)
def test_two_qubit_parameters(self, device, init_state, op, func, param, tol, skip_if):
n_wires = 2
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(param, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((func(param) @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('mat', [U, U2])
def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):
'Test QubitUnitary gate.'
n_wires = int(np.log2(len(mat)))
dev = device(n_wires)
if ('QubitUnitary' not in dev.operations):
pytest.skip('Skipped because device does not support QubitUnitary.')
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.QubitUnitary(mat, wires=list(range(n_wires)))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | 9,076,061,270,975,523,000 | Test QubitUnitary gate. | pennylane/devices/tests/test_gates.py | test_qubit_unitary | AlaricCheng/pennylane | python | @pytest.mark.parametrize('mat', [U, U2])
def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):
n_wires = int(np.log2(len(mat)))
dev = device(n_wires)
if ('QubitUnitary' not in dev.operations):
pytest.skip('Skipped because device does not support QubitUnitary.')
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.QubitUnitary(mat, wires=list(range(n_wires)))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('op, mat', three_qubit)
def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
'Test three qubit gates without parameters.'
n_wires = 3
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=[0, 1, 2])
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | 3,107,338,228,723,931,000 | Test three qubit gates without parameters. | pennylane/devices/tests/test_gates.py | test_three_qubit_no_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('op, mat', three_qubit)
def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
n_wires = 3
dev = device(n_wires)
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=[0, 1, 2])
return qml.probs(wires=range(n_wires))
res = circuit()
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('op,mat', single_qubit)
def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
'Test inverse single qubit gate application.'
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(1)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | -8,789,510,747,028,951,000 | Test inverse single qubit gate application. | pennylane/devices/tests/test_gates.py | test_single_qubit_no_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('op,mat', single_qubit)
def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(1)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('gamma', [0.5432, (- 0.232)])
@pytest.mark.parametrize('op,func', single_qubit_param)
def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
'Test inverse single qubit gates taking one scalar parameter.'
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = func(gamma)
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | 6,775,869,605,225,328,000 | Test inverse single qubit gates taking one scalar parameter. | pennylane/devices/tests/test_gates.py | test_single_qubit_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('gamma', [0.5432, (- 0.232)])
@pytest.mark.parametrize('op,func', single_qubit_param)
def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = func(gamma)
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
def test_rotation(self, device, init_state, tol, skip_if):
'Test inverse three axis rotation gate.'
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(1)
a = 0.542
b = 1.3432
c = (- 0.654)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.Rot(a, b, c, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = rot(a, b, c)
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | -4,072,738,073,151,294,500 | Test inverse three axis rotation gate. | pennylane/devices/tests/test_gates.py | test_rotation | AlaricCheng/pennylane | python | def test_rotation(self, device, init_state, tol, skip_if):
n_wires = 1
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(1)
a = 0.542
b = 1.3432
c = (- 0.654)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.Rot(a, b, c, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = rot(a, b, c)
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('op,mat', two_qubit)
def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
'Test inverse two qubit gates.'
n_wires = 2
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | -2,651,194,359,041,893,000 | Test inverse two qubit gates. | pennylane/devices/tests/test_gates.py | test_two_qubit_no_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('op,mat', two_qubit)
def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
n_wires = 2
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('gamma', [0.5432, (- 0.232)])
@pytest.mark.parametrize('op,func', two_qubit_param)
def test_two_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
'Test inverse of two qubit gates taking one parameter.'
n_wires = 2
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(2)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = func(gamma)
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | 9,184,913,666,578,097,000 | Test inverse of two qubit gates taking one parameter. | pennylane/devices/tests/test_gates.py | test_two_qubit_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('gamma', [0.5432, (- 0.232)])
@pytest.mark.parametrize('op,func', two_qubit_param)
def test_two_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
n_wires = 2
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(2)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = func(gamma)
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('mat', [U, U2])
def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):
'Test inverse QubitUnitary gate.'
n_wires = int(np.log2(len(mat)))
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.QubitUnitary(mat, wires=list(range(n_wires))).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | -6,242,251,011,937,020,000 | Test inverse QubitUnitary gate. | pennylane/devices/tests/test_gates.py | test_qubit_unitary | AlaricCheng/pennylane | python | @pytest.mark.parametrize('mat', [U, U2])
def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):
n_wires = int(np.log2(len(mat)))
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.QubitUnitary(mat, wires=list(range(n_wires))).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
@pytest.mark.parametrize('op, mat', three_qubit)
def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
'Test inverse three qubit gates without parameters.'
n_wires = 3
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(3)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) | -3,027,857,760,516,276,700 | Test inverse three qubit gates without parameters. | pennylane/devices/tests/test_gates.py | test_three_qubit_no_parameters | AlaricCheng/pennylane | python | @pytest.mark.parametrize('op, mat', three_qubit)
def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
n_wires = 3
dev = device(n_wires)
skip_if(dev, {'supports_inverse_operations': False})
skip_if(dev, {'returns_probs': False})
rnd_state = init_state(3)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = (np.abs((mat @ rnd_state)) ** 2)
assert np.allclose(res, expected, atol=tol(dev.shots)) |
def optimize_feature_power(df, output_column_name=None, exponents=[2.0, 1.0, 0.8, 0.5, 0.25, 0.1, 0.01]):
" Plot the correlation coefficient for various exponential scalings of input features\n\n >>> np.random.seed(314159)\n >>> df = pd.DataFrame()\n >>> df['output'] = np.random.randn(1000)\n >>> df['x10'] = df.output * 10\n >>> df['sq'] = df.output ** 2\n >>> df['sqrt'] = df.output ** .5\n >>> optimize_feature_power(df, output_column_name='output').round(2)\n x10 sq sqrt\n power\n 2.00 -0.08 1.00 0.83\n 1.00 1.00 -0.08 0.97\n 0.80 1.00 0.90 0.99\n 0.50 0.97 0.83 1.00\n 0.25 0.93 0.76 0.99\n 0.10 0.89 0.71 0.97\n 0.01 0.86 0.67 0.95\n\n Returns:\n DataFrame:\n columns are the input_columns from the source dataframe (df)\n rows are correlation with output for each attempted exponent used to scale the input features\n "
output_column_name = (list(df.columns)[(- 1)] if (output_column_name is None) else output_column_name)
input_column_names = [colname for colname in df.columns if (output_column_name != colname)]
results = np.zeros((len(exponents), len(input_column_names)))
for (rownum, exponent) in enumerate(exponents):
for (colnum, column_name) in enumerate(input_column_names):
results[(rownum, colnum)] = (df[output_column_name] ** exponent).corr(df[column_name])
results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power'))
return results | -3,704,889,774,207,322,000 | Plot the correlation coefficient for various exponential scalings of input features
>>> np.random.seed(314159)
>>> df = pd.DataFrame()
>>> df['output'] = np.random.randn(1000)
>>> df['x10'] = df.output * 10
>>> df['sq'] = df.output ** 2
>>> df['sqrt'] = df.output ** .5
>>> optimize_feature_power(df, output_column_name='output').round(2)
x10 sq sqrt
power
2.00 -0.08 1.00 0.83
1.00 1.00 -0.08 0.97
0.80 1.00 0.90 0.99
0.50 0.97 0.83 1.00
0.25 0.93 0.76 0.99
0.10 0.89 0.71 0.97
0.01 0.86 0.67 0.95
Returns:
DataFrame:
columns are the input_columns from the source dataframe (df)
rows are correlation with output for each attempted exponent used to scale the input features | src/nlpia/features.py | optimize_feature_power | AAAI-DISIM-UnivAQ/nlpia | python | def optimize_feature_power(df, output_column_name=None, exponents=[2.0, 1.0, 0.8, 0.5, 0.25, 0.1, 0.01]):
" Plot the correlation coefficient for various exponential scalings of input features\n\n >>> np.random.seed(314159)\n >>> df = pd.DataFrame()\n >>> df['output'] = np.random.randn(1000)\n >>> df['x10'] = df.output * 10\n >>> df['sq'] = df.output ** 2\n >>> df['sqrt'] = df.output ** .5\n >>> optimize_feature_power(df, output_column_name='output').round(2)\n x10 sq sqrt\n power\n 2.00 -0.08 1.00 0.83\n 1.00 1.00 -0.08 0.97\n 0.80 1.00 0.90 0.99\n 0.50 0.97 0.83 1.00\n 0.25 0.93 0.76 0.99\n 0.10 0.89 0.71 0.97\n 0.01 0.86 0.67 0.95\n\n Returns:\n DataFrame:\n columns are the input_columns from the source dataframe (df)\n rows are correlation with output for each attempted exponent used to scale the input features\n "
output_column_name = (list(df.columns)[(- 1)] if (output_column_name is None) else output_column_name)
input_column_names = [colname for colname in df.columns if (output_column_name != colname)]
results = np.zeros((len(exponents), len(input_column_names)))
for (rownum, exponent) in enumerate(exponents):
for (colnum, column_name) in enumerate(input_column_names):
results[(rownum, colnum)] = (df[output_column_name] ** exponent).corr(df[column_name])
results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power'))
return results |
def write_to_big_query(testcase, regression_range_start, regression_range_end):
'Write the regression range to BigQuery.'
big_query.write_range(table_id='regressions', testcase=testcase, range_name='regression', start=regression_range_start, end=regression_range_end) | 719,582,359,429,633,500 | Write the regression range to BigQuery. | src/python/bot/tasks/regression_task.py | write_to_big_query | backwardn/clusterfuzz | python | def write_to_big_query(testcase, regression_range_start, regression_range_end):
big_query.write_range(table_id='regressions', testcase=testcase, range_name='regression', start=regression_range_start, end=regression_range_end) |
def _save_current_regression_range_indices(testcase_id, regression_range_start, regression_range_end):
'Save current regression range indices in case we die in middle of task.'
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.set_metadata('last_regression_min', regression_range_start, update_testcase=False)
testcase.set_metadata('last_regression_max', regression_range_end, update_testcase=False)
testcase.put() | 737,990,780,417,797,600 | Save current regression range indices in case we die in middle of task. | src/python/bot/tasks/regression_task.py | _save_current_regression_range_indices | backwardn/clusterfuzz | python | def _save_current_regression_range_indices(testcase_id, regression_range_start, regression_range_end):
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.set_metadata('last_regression_min', regression_range_start, update_testcase=False)
testcase.set_metadata('last_regression_max', regression_range_end, update_testcase=False)
testcase.put() |
def save_regression_range(testcase_id, regression_range_start, regression_range_end):
'Saves the regression range and creates blame and impact task if needed.'
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.regression = ('%d:%d' % (regression_range_start, regression_range_end))
data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED, ('regressed in range %s' % testcase.regression))
write_to_big_query(testcase, regression_range_start, regression_range_end)
task_creation.create_impact_task_if_needed(testcase)
task_creation.create_blame_task_if_needed(testcase)
task_creation.request_bisection(testcase, 'regressed') | 5,124,561,393,051,868,000 | Saves the regression range and creates blame and impact task if needed. | src/python/bot/tasks/regression_task.py | save_regression_range | backwardn/clusterfuzz | python | def save_regression_range(testcase_id, regression_range_start, regression_range_end):
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.regression = ('%d:%d' % (regression_range_start, regression_range_end))
data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED, ('regressed in range %s' % testcase.regression))
write_to_big_query(testcase, regression_range_start, regression_range_end)
task_creation.create_impact_task_if_needed(testcase)
task_creation.create_blame_task_if_needed(testcase)
task_creation.request_bisection(testcase, 'regressed') |
def _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision, should_log=True, min_revision=None, max_revision=None):
'Test to see if a test case reproduces in the specified revision.'
if should_log:
log_message = ('Testing r%d' % revision)
if ((min_revision is not None) and (max_revision is not None)):
log_message += (' (current range %d:%d)' % (min_revision, max_revision))
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP, log_message)
build_manager.setup_build(revision)
if (not build_manager.check_app_path()):
raise errors.BuildSetupError(revision, job_type)
if testcase_manager.check_for_bad_build(job_type, revision):
log_message = ('Bad build at r%d. Skipping' % revision)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP, log_message)
raise errors.BadBuildError(revision, job_type)
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
return result.is_crash() | 5,971,759,863,926,169,000 | Test to see if a test case reproduces in the specified revision. | src/python/bot/tasks/regression_task.py | _testcase_reproduces_in_revision | backwardn/clusterfuzz | python | def _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision, should_log=True, min_revision=None, max_revision=None):
if should_log:
log_message = ('Testing r%d' % revision)
if ((min_revision is not None) and (max_revision is not None)):
log_message += (' (current range %d:%d)' % (min_revision, max_revision))
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP, log_message)
build_manager.setup_build(revision)
if (not build_manager.check_app_path()):
raise errors.BuildSetupError(revision, job_type)
if testcase_manager.check_for_bad_build(job_type, revision):
log_message = ('Bad build at r%d. Skipping' % revision)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP, log_message)
raise errors.BadBuildError(revision, job_type)
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
return result.is_crash() |
def found_regression_near_extreme_revisions(testcase, testcase_file_path, job_type, revision_list, min_index, max_index):
'Test to see if we regressed near either the min or max revision.'
last_known_crashing_revision = revision_list[max_index]
for offset in range(1, (EXTREME_REVISIONS_TO_TEST + 1)):
current_index = (max_index - offset)
if (current_index < min_index):
break
try:
is_crash = _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision_list[current_index])
except errors.BadBuildError:
continue
if (not is_crash):
save_regression_range(testcase.key.id(), revision_list[current_index], last_known_crashing_revision)
return True
last_known_crashing_revision = revision_list[current_index]
for _ in range(EXTREME_REVISIONS_TO_TEST):
min_revision = revision_list[min_index]
try:
crashes_in_min_revision = _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, min_revision, should_log=False)
except errors.BadBuildError:
if ((min_index + 1) >= max_index):
break
min_index += 1
continue
if crashes_in_min_revision:
save_regression_range(testcase.key.id(), 0, min_revision)
return True
return False
raise errors.BadBuildError(revision_list[min_index], job_type) | 4,292,211,542,325,893,000 | Test to see if we regressed near either the min or max revision. | src/python/bot/tasks/regression_task.py | found_regression_near_extreme_revisions | backwardn/clusterfuzz | python | def found_regression_near_extreme_revisions(testcase, testcase_file_path, job_type, revision_list, min_index, max_index):
last_known_crashing_revision = revision_list[max_index]
for offset in range(1, (EXTREME_REVISIONS_TO_TEST + 1)):
current_index = (max_index - offset)
if (current_index < min_index):
break
try:
is_crash = _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision_list[current_index])
except errors.BadBuildError:
continue
if (not is_crash):
save_regression_range(testcase.key.id(), revision_list[current_index], last_known_crashing_revision)
return True
last_known_crashing_revision = revision_list[current_index]
for _ in range(EXTREME_REVISIONS_TO_TEST):
min_revision = revision_list[min_index]
try:
crashes_in_min_revision = _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, min_revision, should_log=False)
except errors.BadBuildError:
if ((min_index + 1) >= max_index):
break
min_index += 1
continue
if crashes_in_min_revision:
save_regression_range(testcase.key.id(), 0, min_revision)
return True
return False
raise errors.BadBuildError(revision_list[min_index], job_type) |
def validate_regression_range(testcase, testcase_file_path, job_type, revision_list, min_index):
'Ensure that we found the correct min revision by testing earlier ones.'
earlier_revisions = revision_list[(min_index - EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION):min_index]
revision_count = min(len(earlier_revisions), REVISIONS_TO_TEST_FOR_VALIDATION)
revisions_to_test = random.sample(earlier_revisions, revision_count)
for revision in revisions_to_test:
try:
if _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision):
testcase = data_handler.get_testcase_by_id(testcase.key.id())
testcase.regression = 'NA'
error_message = ('Low confidence in regression range. Test case crashes in revision r%d but not later revision r%d' % (revision, revision_list[min_index]))
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
return False
except errors.BadBuildError:
pass
return True | -5,095,946,911,319,247,000 | Ensure that we found the correct min revision by testing earlier ones. | src/python/bot/tasks/regression_task.py | validate_regression_range | backwardn/clusterfuzz | python | def validate_regression_range(testcase, testcase_file_path, job_type, revision_list, min_index):
earlier_revisions = revision_list[(min_index - EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION):min_index]
revision_count = min(len(earlier_revisions), REVISIONS_TO_TEST_FOR_VALIDATION)
revisions_to_test = random.sample(earlier_revisions, revision_count)
for revision in revisions_to_test:
try:
if _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision):
testcase = data_handler.get_testcase_by_id(testcase.key.id())
testcase.regression = 'NA'
error_message = ('Low confidence in regression range. Test case crashes in revision r%d but not later revision r%d' % (revision, revision_list[min_index]))
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
return False
except errors.BadBuildError:
pass
return True |
def find_regression_range(testcase_id, job_type):
'Attempt to find when the testcase regressed.'
deadline = tasks.get_task_completion_deadline()
testcase = data_handler.get_testcase_by_id(testcase_id)
if (not testcase):
return
if testcase.regression:
logs.log_error(('Regression range is already set as %s, skip.' % testcase.regression))
return
if build_manager.is_custom_binary():
testcase.regression = 'NA'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Not applicable for custom binaries')
return
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
(file_list, _, testcase_file_path) = setup.setup_testcase(testcase, job_type)
if (not file_list):
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Failed to setup testcase')
tasks.add_task('regression', testcase_id, job_type)
return
build_bucket_path = build_manager.get_primary_bucket_path()
revision_list = build_manager.get_revisions_list(build_bucket_path, testcase=testcase)
if (not revision_list):
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Failed to fetch revision list')
tasks.add_task('regression', testcase_id, job_type)
return
environment.set_value('CACHE_STORE', False)
min_revision = testcase.get_metadata('last_regression_min')
max_revision = testcase.get_metadata('last_regression_max')
first_run = ((not min_revision) and (not max_revision))
if (not min_revision):
min_revision = revisions.get_first_revision_in_list(revision_list)
if (not max_revision):
max_revision = testcase.crash_revision
min_index = revisions.find_min_revision_index(revision_list, min_revision)
if (min_index is None):
raise errors.BuildNotFoundError(min_revision, job_type)
max_index = revisions.find_max_revision_index(revision_list, max_revision)
if (max_index is None):
raise errors.BuildNotFoundError(max_revision, job_type)
max_revision = revision_list[max_index]
crashes_in_max_revision = _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, max_revision, should_log=False)
if (not crashes_in_max_revision):
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = ('Known crash revision %d did not crash' % max_revision)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
task_creation.mark_unreproducible_if_flaky(testcase, True)
return
task_creation.mark_unreproducible_if_flaky(testcase, False)
if (first_run and found_regression_near_extreme_revisions(testcase, testcase_file_path, job_type, revision_list, min_index, max_index)):
return
while (time.time() < deadline):
min_revision = revision_list[min_index]
max_revision = revision_list[max_index]
if ((max_index - min_index) <= 1):
if (not validate_regression_range(testcase, testcase_file_path, job_type, revision_list, min_index)):
return
save_regression_range(testcase_id, min_revision, max_revision)
return
middle_index = ((min_index + max_index) // 2)
middle_revision = revision_list[middle_index]
try:
is_crash = _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, middle_revision, min_revision=min_revision, max_revision=max_revision)
except errors.BadBuildError:
del revision_list[middle_index]
max_index -= 1
continue
if is_crash:
max_index = middle_index
else:
min_index = middle_index
_save_current_regression_range_indices(testcase_id, revision_list[min_index], revision_list[max_index])
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = ('Timed out, current range r%d:r%d' % (revision_list[min_index], revision_list[max_index]))
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
tasks.add_task('regression', testcase_id, job_type) | -6,315,374,529,848,634,000 | Attempt to find when the testcase regressed. | src/python/bot/tasks/regression_task.py | find_regression_range | backwardn/clusterfuzz | python | def find_regression_range(testcase_id, job_type):
deadline = tasks.get_task_completion_deadline()
testcase = data_handler.get_testcase_by_id(testcase_id)
if (not testcase):
return
if testcase.regression:
logs.log_error(('Regression range is already set as %s, skip.' % testcase.regression))
return
if build_manager.is_custom_binary():
testcase.regression = 'NA'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Not applicable for custom binaries')
return
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
(file_list, _, testcase_file_path) = setup.setup_testcase(testcase, job_type)
if (not file_list):
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Failed to setup testcase')
tasks.add_task('regression', testcase_id, job_type)
return
build_bucket_path = build_manager.get_primary_bucket_path()
revision_list = build_manager.get_revisions_list(build_bucket_path, testcase=testcase)
if (not revision_list):
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Failed to fetch revision list')
tasks.add_task('regression', testcase_id, job_type)
return
environment.set_value('CACHE_STORE', False)
min_revision = testcase.get_metadata('last_regression_min')
max_revision = testcase.get_metadata('last_regression_max')
first_run = ((not min_revision) and (not max_revision))
if (not min_revision):
min_revision = revisions.get_first_revision_in_list(revision_list)
if (not max_revision):
max_revision = testcase.crash_revision
min_index = revisions.find_min_revision_index(revision_list, min_revision)
if (min_index is None):
raise errors.BuildNotFoundError(min_revision, job_type)
max_index = revisions.find_max_revision_index(revision_list, max_revision)
if (max_index is None):
raise errors.BuildNotFoundError(max_revision, job_type)
max_revision = revision_list[max_index]
crashes_in_max_revision = _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, max_revision, should_log=False)
if (not crashes_in_max_revision):
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = ('Known crash revision %d did not crash' % max_revision)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
task_creation.mark_unreproducible_if_flaky(testcase, True)
return
task_creation.mark_unreproducible_if_flaky(testcase, False)
if (first_run and found_regression_near_extreme_revisions(testcase, testcase_file_path, job_type, revision_list, min_index, max_index)):
return
while (time.time() < deadline):
min_revision = revision_list[min_index]
max_revision = revision_list[max_index]
if ((max_index - min_index) <= 1):
if (not validate_regression_range(testcase, testcase_file_path, job_type, revision_list, min_index)):
return
save_regression_range(testcase_id, min_revision, max_revision)
return
middle_index = ((min_index + max_index) // 2)
middle_revision = revision_list[middle_index]
try:
is_crash = _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, middle_revision, min_revision=min_revision, max_revision=max_revision)
except errors.BadBuildError:
del revision_list[middle_index]
max_index -= 1
continue
if is_crash:
max_index = middle_index
else:
min_index = middle_index
_save_current_regression_range_indices(testcase_id, revision_list[min_index], revision_list[max_index])
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = ('Timed out, current range r%d:r%d' % (revision_list[min_index], revision_list[max_index]))
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
tasks.add_task('regression', testcase_id, job_type) |
def execute_task(testcase_id, job_type):
'Run regression task and handle potential errors.'
try:
find_regression_range(testcase_id, job_type)
except errors.BuildSetupError as error:
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = ('Build setup failed r%d' % error.revision)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
build_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task('regression', testcase_id, job_type, wait_time=build_fail_wait)
except errors.BadBuildError:
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.regression = 'NA'
error_message = 'Unable to recover from bad build'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message) | -4,425,301,150,540,572,000 | Run regression task and handle potential errors. | src/python/bot/tasks/regression_task.py | execute_task | backwardn/clusterfuzz | python | def execute_task(testcase_id, job_type):
try:
find_regression_range(testcase_id, job_type)
except errors.BuildSetupError as error:
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = ('Build setup failed r%d' % error.revision)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message)
build_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task('regression', testcase_id, job_type, wait_time=build_fail_wait)
except errors.BadBuildError:
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.regression = 'NA'
error_message = 'Unable to recover from bad build'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message) |
def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"\n Same as Django's standard shortcut, but make sure to also raise 404\n if the filter_kwargs don't match the required types.\n "
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise APIException(message='该对象不存在或者无访问权限') | -3,799,969,164,196,659,700 | Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types. | backend/utils/viewset.py | get_object_or_404 | lybbn/django-vue-lyadmin | python | def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"\n Same as Django's standard shortcut, but make sure to also raise 404\n if the filter_kwargs don't match the required types.\n "
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise APIException(message='该对象不存在或者无访问权限') |
def run_describe_cli_tests(experiment_id: int) -> None:
'\n Runs `det experiment describe` CLI command on a finished\n experiment. Will raise an exception if `det experiment describe`\n encounters a traceback failure.\n '
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'describe', str(experiment_id), '--outdir', tmpdir])
assert os.path.exists(os.path.join(tmpdir, 'experiments.csv'))
assert os.path.exists(os.path.join(tmpdir, 'workloads.csv'))
assert os.path.exists(os.path.join(tmpdir, 'trials.csv'))
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'describe', str(experiment_id), '--metrics', '--outdir', tmpdir])
assert os.path.exists(os.path.join(tmpdir, 'experiments.csv'))
assert os.path.exists(os.path.join(tmpdir, 'workloads.csv'))
assert os.path.exists(os.path.join(tmpdir, 'trials.csv')) | -8,418,697,501,923,943,000 | Runs `det experiment describe` CLI command on a finished
experiment. Will raise an exception if `det experiment describe`
encounters a traceback failure. | e2e_tests/tests/experiment/experiment.py | run_describe_cli_tests | liamcli/determined | python | def run_describe_cli_tests(experiment_id: int) -> None:
'\n Runs `det experiment describe` CLI command on a finished\n experiment. Will raise an exception if `det experiment describe`\n encounters a traceback failure.\n '
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'describe', str(experiment_id), '--outdir', tmpdir])
assert os.path.exists(os.path.join(tmpdir, 'experiments.csv'))
assert os.path.exists(os.path.join(tmpdir, 'workloads.csv'))
assert os.path.exists(os.path.join(tmpdir, 'trials.csv'))
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'describe', str(experiment_id), '--metrics', '--outdir', tmpdir])
assert os.path.exists(os.path.join(tmpdir, 'experiments.csv'))
assert os.path.exists(os.path.join(tmpdir, 'workloads.csv'))
assert os.path.exists(os.path.join(tmpdir, 'trials.csv')) |
def run_list_cli_tests(experiment_id: int) -> None:
'\n Runs list-related CLI commands on a finished experiment. Will raise an\n exception if the CLI command encounters a traceback failure.\n '
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'list-trials', str(experiment_id)])
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'list-checkpoints', str(experiment_id)])
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'list-checkpoints', '--best', str(1), str(experiment_id)]) | -1,878,743,959,272,950,800 | Runs list-related CLI commands on a finished experiment. Will raise an
exception if the CLI command encounters a traceback failure. | e2e_tests/tests/experiment/experiment.py | run_list_cli_tests | liamcli/determined | python | def run_list_cli_tests(experiment_id: int) -> None:
'\n Runs list-related CLI commands on a finished experiment. Will raise an\n exception if the CLI command encounters a traceback failure.\n '
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'list-trials', str(experiment_id)])
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'list-checkpoints', str(experiment_id)])
subprocess.check_call(['det', '-m', conf.make_master_url(), 'experiment', 'list-checkpoints', '--best', str(1), str(experiment_id)]) |
@property
def name(self):
'Name property'
ts = datetime.now().isoformat()
ts = ts.replace('-', '').replace(':', '').split('.')[0]
tab = (self.tab_name.replace(' ', '_').lower() if self.tab_name else 'notab')
tab = re.sub('\\W+', '', tab)
return f'sqllab_{tab}_{ts}' | 5,821,104,741,660,253,000 | Name property | superset/models/sql_lab.py | name | Zandut/Superset-Funnel | python | @property
def name(self):
ts = datetime.now().isoformat()
ts = ts.replace('-', ).replace(':', ).split('.')[0]
tab = (self.tab_name.replace(' ', '_').lower() if self.tab_name else 'notab')
tab = re.sub('\\W+', , tab)
return f'sqllab_{tab}_{ts}' |
def __init__(self, app=None):
'Extension initialization.'
if app:
self.init_app(app) | -3,186,472,059,789,530,600 | Extension initialization. | invenio_userprofiles/ext.py | __init__ | 0x2b3bfa0/invenio-userprofiles | python | def __init__(self, app=None):
if app:
self.init_app(app) |
def init_app(self, app):
'Flask application initialization.'
self.init_config(app)
app.context_processor((lambda : dict(current_userprofile=current_userprofile)))
app.extensions['invenio-userprofiles'] = self | 1,631,280,349,408,299,500 | Flask application initialization. | invenio_userprofiles/ext.py | init_app | 0x2b3bfa0/invenio-userprofiles | python | def init_app(self, app):
self.init_config(app)
app.context_processor((lambda : dict(current_userprofile=current_userprofile)))
app.extensions['invenio-userprofiles'] = self |
def init_config(self, app):
'Initialize configuration.'
excludes = ['USERPROFILES_BASE_TEMPLATE', 'USERPROFILES_SETTINGS_TEMPLATE']
for k in dir(config):
if (k.startswith('USERPROFILES_') and (k not in excludes)):
app.config.setdefault(k, getattr(config, k))
app.config.setdefault('USERPROFILES', True)
app.config.setdefault('USERPROFILES_BASE_TEMPLATE', app.config.get('BASE_TEMPLATE', 'invenio_userprofiles/base.html'))
app.config.setdefault('USERPROFILES_SETTINGS_TEMPLATE', app.config.get('SETTINGS_TEMPLATE', 'invenio_userprofiles/settings/base.html'))
if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']:
app.config.setdefault('USERPROFILES_REGISTER_USER_BASE_TEMPLATE', app.config.get('SECURITY_REGISTER_USER_TEMPLATE', 'invenio_accounts/register_user.html'))
app.config['SECURITY_REGISTER_USER_TEMPLATE'] = 'invenio_userprofiles/register_user.html' | 8,313,840,024,581,992,000 | Initialize configuration. | invenio_userprofiles/ext.py | init_config | 0x2b3bfa0/invenio-userprofiles | python | def init_config(self, app):
excludes = ['USERPROFILES_BASE_TEMPLATE', 'USERPROFILES_SETTINGS_TEMPLATE']
for k in dir(config):
if (k.startswith('USERPROFILES_') and (k not in excludes)):
app.config.setdefault(k, getattr(config, k))
app.config.setdefault('USERPROFILES', True)
app.config.setdefault('USERPROFILES_BASE_TEMPLATE', app.config.get('BASE_TEMPLATE', 'invenio_userprofiles/base.html'))
app.config.setdefault('USERPROFILES_SETTINGS_TEMPLATE', app.config.get('SETTINGS_TEMPLATE', 'invenio_userprofiles/settings/base.html'))
if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']:
app.config.setdefault('USERPROFILES_REGISTER_USER_BASE_TEMPLATE', app.config.get('SECURITY_REGISTER_USER_TEMPLATE', 'invenio_accounts/register_user.html'))
app.config['SECURITY_REGISTER_USER_TEMPLATE'] = 'invenio_userprofiles/register_user.html' |
def __init__(__self__, *, active_directory_id: Optional[pulumi.Input[str]]=None, dns: Optional[pulumi.Input[str]]=None, domain: Optional[pulumi.Input[str]]=None, organizational_unit: Optional[pulumi.Input[str]]=None, password: Optional[pulumi.Input[str]]=None, smb_server_name: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, username: Optional[pulumi.Input[str]]=None):
'\n Active Directory\n :param pulumi.Input[str] active_directory_id: Id of the Active Directory\n :param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain\n :param pulumi.Input[str] domain: Name of the Active Directory domain\n :param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory\n :param pulumi.Input[str] password: Plain text password of Active Directory domain administrator\n :param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes\n :param pulumi.Input[str] status: Status of the Active Directory\n :param pulumi.Input[str] username: Username of Active Directory domain administrator\n '
if (active_directory_id is not None):
pulumi.set(__self__, 'active_directory_id', active_directory_id)
if (dns is not None):
pulumi.set(__self__, 'dns', dns)
if (domain is not None):
pulumi.set(__self__, 'domain', domain)
if (organizational_unit is not None):
pulumi.set(__self__, 'organizational_unit', organizational_unit)
if (password is not None):
pulumi.set(__self__, 'password', password)
if (smb_server_name is not None):
pulumi.set(__self__, 'smb_server_name', smb_server_name)
if (status is not None):
pulumi.set(__self__, 'status', status)
if (username is not None):
pulumi.set(__self__, 'username', username) | 9,085,980,231,334,252,000 | Active Directory
:param pulumi.Input[str] active_directory_id: Id of the Active Directory
:param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain
:param pulumi.Input[str] domain: Name of the Active Directory domain
:param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory
:param pulumi.Input[str] password: Plain text password of Active Directory domain administrator
:param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
:param pulumi.Input[str] status: Status of the Active Directory
:param pulumi.Input[str] username: Username of Active Directory domain administrator | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | __init__ | polivbr/pulumi-azure-native | python | def __init__(__self__, *, active_directory_id: Optional[pulumi.Input[str]]=None, dns: Optional[pulumi.Input[str]]=None, domain: Optional[pulumi.Input[str]]=None, organizational_unit: Optional[pulumi.Input[str]]=None, password: Optional[pulumi.Input[str]]=None, smb_server_name: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, username: Optional[pulumi.Input[str]]=None):
'\n Active Directory\n :param pulumi.Input[str] active_directory_id: Id of the Active Directory\n :param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain\n :param pulumi.Input[str] domain: Name of the Active Directory domain\n :param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory\n :param pulumi.Input[str] password: Plain text password of Active Directory domain administrator\n :param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes\n :param pulumi.Input[str] status: Status of the Active Directory\n :param pulumi.Input[str] username: Username of Active Directory domain administrator\n '
if (active_directory_id is not None):
pulumi.set(__self__, 'active_directory_id', active_directory_id)
if (dns is not None):
pulumi.set(__self__, 'dns', dns)
if (domain is not None):
pulumi.set(__self__, 'domain', domain)
if (organizational_unit is not None):
pulumi.set(__self__, 'organizational_unit', organizational_unit)
if (password is not None):
pulumi.set(__self__, 'password', password)
if (smb_server_name is not None):
pulumi.set(__self__, 'smb_server_name', smb_server_name)
if (status is not None):
pulumi.set(__self__, 'status', status)
if (username is not None):
pulumi.set(__self__, 'username', username) |
@property
@pulumi.getter(name='activeDirectoryId')
def active_directory_id(self) -> Optional[pulumi.Input[str]]:
'\n Id of the Active Directory\n '
return pulumi.get(self, 'active_directory_id') | 5,840,936,279,723,831,000 | Id of the Active Directory | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | active_directory_id | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='activeDirectoryId')
def active_directory_id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'active_directory_id') |
@property
@pulumi.getter
def dns(self) -> Optional[pulumi.Input[str]]:
'\n Comma separated list of DNS server IP addresses for the Active Directory domain\n '
return pulumi.get(self, 'dns') | -3,212,884,791,679,665,000 | Comma separated list of DNS server IP addresses for the Active Directory domain | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | dns | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def dns(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'dns') |
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
'\n Name of the Active Directory domain\n '
return pulumi.get(self, 'domain') | -4,126,563,998,092,930,600 | Name of the Active Directory domain | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | domain | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'domain') |
@property
@pulumi.getter(name='organizationalUnit')
def organizational_unit(self) -> Optional[pulumi.Input[str]]:
'\n The Organizational Unit (OU) within the Windows Active Directory\n '
return pulumi.get(self, 'organizational_unit') | -3,698,790,052,425,906,000 | The Organizational Unit (OU) within the Windows Active Directory | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | organizational_unit | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='organizationalUnit')
def organizational_unit(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'organizational_unit') |
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
'\n Plain text password of Active Directory domain administrator\n '
return pulumi.get(self, 'password') | -8,485,341,980,645,831,000 | Plain text password of Active Directory domain administrator | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | password | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'password') |
@property
@pulumi.getter(name='smbServerName')
def smb_server_name(self) -> Optional[pulumi.Input[str]]:
'\n NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes\n '
return pulumi.get(self, 'smb_server_name') | 6,771,407,966,225,489,000 | NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | smb_server_name | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='smbServerName')
def smb_server_name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'smb_server_name') |
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
'\n Status of the Active Directory\n '
return pulumi.get(self, 'status') | 6,788,668,124,996,516,000 | Status of the Active Directory | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | status | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'status') |
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
'\n Username of Active Directory domain administrator\n '
return pulumi.get(self, 'username') | 2,335,645,519,393,690,000 | Username of Active Directory domain administrator | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | username | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'username') |
def __init__(__self__, *, allowed_clients: Optional[pulumi.Input[str]]=None, cifs: Optional[pulumi.Input[bool]]=None, nfsv3: Optional[pulumi.Input[bool]]=None, nfsv4: Optional[pulumi.Input[bool]]=None, rule_index: Optional[pulumi.Input[int]]=None, unix_read_only: Optional[pulumi.Input[bool]]=None, unix_read_write: Optional[pulumi.Input[bool]]=None):
'\n Volume Export Policy Rule\n :param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names\n :param pulumi.Input[bool] cifs: Allows CIFS protocol\n :param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol\n :param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later\n :param pulumi.Input[int] rule_index: Order index\n :param pulumi.Input[bool] unix_read_only: Read only access\n :param pulumi.Input[bool] unix_read_write: Read and write access\n '
if (allowed_clients is not None):
pulumi.set(__self__, 'allowed_clients', allowed_clients)
if (cifs is not None):
pulumi.set(__self__, 'cifs', cifs)
if (nfsv3 is not None):
pulumi.set(__self__, 'nfsv3', nfsv3)
if (nfsv4 is not None):
pulumi.set(__self__, 'nfsv4', nfsv4)
if (rule_index is not None):
pulumi.set(__self__, 'rule_index', rule_index)
if (unix_read_only is not None):
pulumi.set(__self__, 'unix_read_only', unix_read_only)
if (unix_read_write is not None):
pulumi.set(__self__, 'unix_read_write', unix_read_write) | -7,530,094,763,840,628,000 | Volume Export Policy Rule
:param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
:param pulumi.Input[bool] cifs: Allows CIFS protocol
:param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol
:param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
:param pulumi.Input[int] rule_index: Order index
:param pulumi.Input[bool] unix_read_only: Read only access
:param pulumi.Input[bool] unix_read_write: Read and write access | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | __init__ | polivbr/pulumi-azure-native | python | def __init__(__self__, *, allowed_clients: Optional[pulumi.Input[str]]=None, cifs: Optional[pulumi.Input[bool]]=None, nfsv3: Optional[pulumi.Input[bool]]=None, nfsv4: Optional[pulumi.Input[bool]]=None, rule_index: Optional[pulumi.Input[int]]=None, unix_read_only: Optional[pulumi.Input[bool]]=None, unix_read_write: Optional[pulumi.Input[bool]]=None):
'\n Volume Export Policy Rule\n :param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names\n :param pulumi.Input[bool] cifs: Allows CIFS protocol\n :param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol\n :param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later\n :param pulumi.Input[int] rule_index: Order index\n :param pulumi.Input[bool] unix_read_only: Read only access\n :param pulumi.Input[bool] unix_read_write: Read and write access\n '
if (allowed_clients is not None):
pulumi.set(__self__, 'allowed_clients', allowed_clients)
if (cifs is not None):
pulumi.set(__self__, 'cifs', cifs)
if (nfsv3 is not None):
pulumi.set(__self__, 'nfsv3', nfsv3)
if (nfsv4 is not None):
pulumi.set(__self__, 'nfsv4', nfsv4)
if (rule_index is not None):
pulumi.set(__self__, 'rule_index', rule_index)
if (unix_read_only is not None):
pulumi.set(__self__, 'unix_read_only', unix_read_only)
if (unix_read_write is not None):
pulumi.set(__self__, 'unix_read_write', unix_read_write) |
@property
@pulumi.getter(name='allowedClients')
def allowed_clients(self) -> Optional[pulumi.Input[str]]:
'\n Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names\n '
return pulumi.get(self, 'allowed_clients') | -7,872,364,967,917,837,000 | Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | allowed_clients | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='allowedClients')
def allowed_clients(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'allowed_clients') |
@property
@pulumi.getter
def cifs(self) -> Optional[pulumi.Input[bool]]:
'\n Allows CIFS protocol\n '
return pulumi.get(self, 'cifs') | 486,662,817,713,799,500 | Allows CIFS protocol | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | cifs | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def cifs(self) -> Optional[pulumi.Input[bool]]:
'\n \n '
return pulumi.get(self, 'cifs') |
@property
@pulumi.getter
def nfsv3(self) -> Optional[pulumi.Input[bool]]:
'\n Allows NFSv3 protocol\n '
return pulumi.get(self, 'nfsv3') | 2,313,345,720,636,462,600 | Allows NFSv3 protocol | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | nfsv3 | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def nfsv3(self) -> Optional[pulumi.Input[bool]]:
'\n \n '
return pulumi.get(self, 'nfsv3') |
@property
@pulumi.getter
def nfsv4(self) -> Optional[pulumi.Input[bool]]:
'\n Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later\n '
return pulumi.get(self, 'nfsv4') | -1,784,523,787,893,940,700 | Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | nfsv4 | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def nfsv4(self) -> Optional[pulumi.Input[bool]]:
'\n \n '
return pulumi.get(self, 'nfsv4') |
@property
@pulumi.getter(name='ruleIndex')
def rule_index(self) -> Optional[pulumi.Input[int]]:
'\n Order index\n '
return pulumi.get(self, 'rule_index') | -8,659,377,043,404,429,000 | Order index | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | rule_index | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='ruleIndex')
def rule_index(self) -> Optional[pulumi.Input[int]]:
'\n \n '
return pulumi.get(self, 'rule_index') |
@property
@pulumi.getter(name='unixReadOnly')
def unix_read_only(self) -> Optional[pulumi.Input[bool]]:
'\n Read only access\n '
return pulumi.get(self, 'unix_read_only') | -4,995,608,380,015,937,000 | Read only access | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | unix_read_only | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='unixReadOnly')
def unix_read_only(self) -> Optional[pulumi.Input[bool]]:
'\n \n '
return pulumi.get(self, 'unix_read_only') |
@property
@pulumi.getter(name='unixReadWrite')
def unix_read_write(self) -> Optional[pulumi.Input[bool]]:
'\n Read and write access\n '
return pulumi.get(self, 'unix_read_write') | -8,667,886,035,939,841,000 | Read and write access | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | unix_read_write | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='unixReadWrite')
def unix_read_write(self) -> Optional[pulumi.Input[bool]]:
'\n \n '
return pulumi.get(self, 'unix_read_write') |
def __init__(__self__, *, rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]=None):
"\n Set of export policy rules\n :param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule\n "
if (rules is not None):
pulumi.set(__self__, 'rules', rules) | 5,309,897,111,849,674,000 | Set of export policy rules
:param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | __init__ | polivbr/pulumi-azure-native | python | def __init__(__self__, *, rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]=None):
"\n Set of export policy rules\n :param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule\n "
if (rules is not None):
pulumi.set(__self__, 'rules', rules) |
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]:
'\n Export policy rule\n '
return pulumi.get(self, 'rules') | -1,851,467,702,409,360,000 | Export policy rule | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | rules | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]:
'\n \n '
return pulumi.get(self, 'rules') |
def __init__(self, config):
'\n :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`\n :param config: Configuration to be used for creating the stub.\n '
VapiInterface.__init__(self, config, _ConfigStub)
self._VAPI_OPERATION_IDS = {} | 6,982,411,610,322,444,000 | :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub. | com/vmware/vmc/orgs/sddcs/networks/edges/firewall_client.py | __init__ | adammillerio/vsphere-automation-sdk-python | python | def __init__(self, config):
'\n :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`\n :param config: Configuration to be used for creating the stub.\n '
VapiInterface.__init__(self, config, _ConfigStub)
self._VAPI_OPERATION_IDS = {} |
def delete(self, org, sddc, edge_id):
'\n Delete firewall configuration for a management or compute gateway (NSX\n Edge).\n\n :type org: :class:`str`\n :param org: Organization identifier. (required)\n :type sddc: :class:`str`\n :param sddc: Sddc Identifier. (required)\n :type edge_id: :class:`str`\n :param edge_id: Edge Identifier. (required)\n :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` \n Bad request. Request object passed is invalid.\n :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` \n Forbidden. Authorization header not provided.\n :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` \n Not found. Requested object not found.\n '
return self._invoke('delete', {'org': org, 'sddc': sddc, 'edge_id': edge_id}) | -3,575,079,468,359,822,300 | Delete firewall configuration for a management or compute gateway (NSX
Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found. | com/vmware/vmc/orgs/sddcs/networks/edges/firewall_client.py | delete | adammillerio/vsphere-automation-sdk-python | python | def delete(self, org, sddc, edge_id):
'\n Delete firewall configuration for a management or compute gateway (NSX\n Edge).\n\n :type org: :class:`str`\n :param org: Organization identifier. (required)\n :type sddc: :class:`str`\n :param sddc: Sddc Identifier. (required)\n :type edge_id: :class:`str`\n :param edge_id: Edge Identifier. (required)\n :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` \n Bad request. Request object passed is invalid.\n :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` \n Forbidden. Authorization header not provided.\n :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` \n Not found. Requested object not found.\n '
return self._invoke('delete', {'org': org, 'sddc': sddc, 'edge_id': edge_id}) |
def get(self, org, sddc, edge_id):
'\n Retrieve the firewall configuration for a management or compute gateway\n (NSX Edge).\n\n :type org: :class:`str`\n :param org: Organization identifier. (required)\n :type sddc: :class:`str`\n :param sddc: Sddc Identifier. (required)\n :type edge_id: :class:`str`\n :param edge_id: Edge Identifier. (required)\n :rtype: :class:`com.vmware.vmc.model_client.FirewallConfig`\n :return: com.vmware.vmc.model.FirewallConfig\n :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` \n Bad request. Request object passed is invalid.\n :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` \n Forbidden. Authorization header not provided.\n :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` \n Not found. Requested object not found.\n '
return self._invoke('get', {'org': org, 'sddc': sddc, 'edge_id': edge_id}) | 6,371,970,016,597,263,000 | Retrieve the firewall configuration for a management or compute gateway
(NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.FirewallConfig`
:return: com.vmware.vmc.model.FirewallConfig
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found. | com/vmware/vmc/orgs/sddcs/networks/edges/firewall_client.py | get | adammillerio/vsphere-automation-sdk-python | python | def get(self, org, sddc, edge_id):
'\n Retrieve the firewall configuration for a management or compute gateway\n (NSX Edge).\n\n :type org: :class:`str`\n :param org: Organization identifier. (required)\n :type sddc: :class:`str`\n :param sddc: Sddc Identifier. (required)\n :type edge_id: :class:`str`\n :param edge_id: Edge Identifier. (required)\n :rtype: :class:`com.vmware.vmc.model_client.FirewallConfig`\n :return: com.vmware.vmc.model.FirewallConfig\n :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` \n Bad request. Request object passed is invalid.\n :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` \n Forbidden. Authorization header not provided.\n :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` \n Not found. Requested object not found.\n '
return self._invoke('get', {'org': org, 'sddc': sddc, 'edge_id': edge_id}) |
def update(self, org, sddc, edge_id, firewall_config):
'\n Configure firewall for a management or compute gateway (NSX Edge).\n\n :type org: :class:`str`\n :param org: Organization identifier. (required)\n :type sddc: :class:`str`\n :param sddc: Sddc Identifier. (required)\n :type edge_id: :class:`str`\n :param edge_id: Edge Identifier. (required)\n :type firewall_config: :class:`com.vmware.vmc.model_client.FirewallConfig`\n :param firewall_config: (required)\n :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` \n Bad request. Request object passed is invalid.\n :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` \n Forbidden. Authorization header not provided.\n :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` \n Not found. Requested object not found.\n '
return self._invoke('update', {'org': org, 'sddc': sddc, 'edge_id': edge_id, 'firewall_config': firewall_config}) | -7,804,791,496,785,363,000 | Configure firewall for a management or compute gateway (NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:type firewall_config: :class:`com.vmware.vmc.model_client.FirewallConfig`
:param firewall_config: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found. | com/vmware/vmc/orgs/sddcs/networks/edges/firewall_client.py | update | adammillerio/vsphere-automation-sdk-python | python | def update(self, org, sddc, edge_id, firewall_config):
'\n Configure firewall for a management or compute gateway (NSX Edge).\n\n :type org: :class:`str`\n :param org: Organization identifier. (required)\n :type sddc: :class:`str`\n :param sddc: Sddc Identifier. (required)\n :type edge_id: :class:`str`\n :param edge_id: Edge Identifier. (required)\n :type firewall_config: :class:`com.vmware.vmc.model_client.FirewallConfig`\n :param firewall_config: (required)\n :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` \n Bad request. Request object passed is invalid.\n :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` \n Forbidden. Authorization header not provided.\n :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` \n Not found. Requested object not found.\n '
return self._invoke('update', {'org': org, 'sddc': sddc, 'edge_id': edge_id, 'firewall_config': firewall_config}) |
def __init__(self, config):
'\n :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`\n :param config: Configuration to be used for creating the stub.\n '
VapiInterface.__init__(self, config, _StatisticsStub)
self._VAPI_OPERATION_IDS = {} | 1,127,859,318,954,161,400 | :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub. | com/vmware/vmc/orgs/sddcs/networks/edges/firewall_client.py | __init__ | adammillerio/vsphere-automation-sdk-python | python | def __init__(self, config):
'\n :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`\n :param config: Configuration to be used for creating the stub.\n '
VapiInterface.__init__(self, config, _StatisticsStub)
self._VAPI_OPERATION_IDS = {} |
def get(self, org, sddc, edge_id, rule_id):
'\n Retrieve statistics for a specific firewall rule for a management or\n compute gateway (NSX Edge).\n\n :type org: :class:`str`\n :param org: Organization identifier. (required)\n :type sddc: :class:`str`\n :param sddc: Sddc Identifier. (required)\n :type edge_id: :class:`str`\n :param edge_id: Edge Identifier. (required)\n :type rule_id: :class:`long`\n :param rule_id: Rule Identifier. (required)\n :rtype: :class:`com.vmware.vmc.model_client.FirewallRuleStats`\n :return: com.vmware.vmc.model.FirewallRuleStats\n :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` \n Bad request. Request object passed is invalid.\n :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` \n Forbidden. Authorization header not provided\n :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` \n Not found. Requested object not found.\n '
return self._invoke('get', {'org': org, 'sddc': sddc, 'edge_id': edge_id, 'rule_id': rule_id}) | 7,612,949,391,291,026,000 | Retrieve statistics for a specific firewall rule for a management or
compute gateway (NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:type rule_id: :class:`long`
:param rule_id: Rule Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.FirewallRuleStats`
:return: com.vmware.vmc.model.FirewallRuleStats
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found. | com/vmware/vmc/orgs/sddcs/networks/edges/firewall_client.py | get | adammillerio/vsphere-automation-sdk-python | python | def get(self, org, sddc, edge_id, rule_id):
'\n Retrieve statistics for a specific firewall rule for a management or\n compute gateway (NSX Edge).\n\n :type org: :class:`str`\n :param org: Organization identifier. (required)\n :type sddc: :class:`str`\n :param sddc: Sddc Identifier. (required)\n :type edge_id: :class:`str`\n :param edge_id: Edge Identifier. (required)\n :type rule_id: :class:`long`\n :param rule_id: Rule Identifier. (required)\n :rtype: :class:`com.vmware.vmc.model_client.FirewallRuleStats`\n :return: com.vmware.vmc.model.FirewallRuleStats\n :raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest` \n Bad request. Request object passed is invalid.\n :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` \n Forbidden. Authorization header not provided\n :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` \n Not found. Requested object not found.\n '
return self._invoke('get', {'org': org, 'sddc': sddc, 'edge_id': edge_id, 'rule_id': rule_id}) |
def __init__(self, subtitled_html, recorded_voiceovers, written_translations):
'Constructs a SubtopicPageContents domain object.\n\n Args:\n subtitled_html: SubtitledHtml. The html data being displayed on\n the page.\n recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for\n the subtopic page content and their translations in different\n languages.\n written_translations: WrittenTranslations. The text translations of\n the subtopic page content.\n '
self.subtitled_html = subtitled_html
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations | 3,073,627,641,787,389,000 | Constructs a SubtopicPageContents domain object.
Args:
subtitled_html: SubtitledHtml. The html data being displayed on
the page.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the subtopic page content and their translations in different
languages.
written_translations: WrittenTranslations. The text translations of
the subtopic page content. | core/domain/subtopic_page_domain.py | __init__ | 5andeepNambiar/oppia | python | def __init__(self, subtitled_html, recorded_voiceovers, written_translations):
'Constructs a SubtopicPageContents domain object.\n\n Args:\n subtitled_html: SubtitledHtml. The html data being displayed on\n the page.\n recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for\n the subtopic page content and their translations in different\n languages.\n written_translations: WrittenTranslations. The text translations of\n the subtopic page content.\n '
self.subtitled_html = subtitled_html
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations |
def validate(self):
'Validates the SubtopicPageContentsObject, verifying that all\n fields are of the correct type.\n '
self.subtitled_html.validate()
content_ids = set([self.subtitled_html.content_id])
self.recorded_voiceovers.validate(content_ids)
self.written_translations.validate(content_ids) | 3,438,324,540,138,235,400 | Validates the SubtopicPageContentsObject, verifying that all
fields are of the correct type. | core/domain/subtopic_page_domain.py | validate | 5andeepNambiar/oppia | python | def validate(self):
'Validates the SubtopicPageContentsObject, verifying that all\n fields are of the correct type.\n '
self.subtitled_html.validate()
content_ids = set([self.subtitled_html.content_id])
self.recorded_voiceovers.validate(content_ids)
self.written_translations.validate(content_ids) |
@classmethod
def create_default_subtopic_page_contents(cls):
'Creates a default subtopic page contents object.\n\n Returns:\n SubtopicPageContents. A default object.\n '
content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID
return cls(state_domain.SubtitledHtml.create_default_subtitled_html(content_id), state_domain.RecordedVoiceovers.from_dict({'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict({'translations_mapping': {content_id: {}}})) | -2,285,957,106,045,894,000 | Creates a default subtopic page contents object.
Returns:
SubtopicPageContents. A default object. | core/domain/subtopic_page_domain.py | create_default_subtopic_page_contents | 5andeepNambiar/oppia | python | @classmethod
def create_default_subtopic_page_contents(cls):
'Creates a default subtopic page contents object.\n\n Returns:\n SubtopicPageContents. A default object.\n '
content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID
return cls(state_domain.SubtitledHtml.create_default_subtitled_html(content_id), state_domain.RecordedVoiceovers.from_dict({'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict({'translations_mapping': {content_id: {}}})) |
def to_dict(self):
'Returns a dict representing this SubtopicPageContents domain object.\n\n Returns:\n dict. A dict, mapping all fields of SubtopicPageContents instance.\n '
return {'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict()} | -6,016,978,639,589,662,000 | Returns a dict representing this SubtopicPageContents domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPageContents instance. | core/domain/subtopic_page_domain.py | to_dict | 5andeepNambiar/oppia | python | def to_dict(self):
'Returns a dict representing this SubtopicPageContents domain object.\n\n Returns:\n dict. A dict, mapping all fields of SubtopicPageContents instance.\n '
return {'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict()} |
@classmethod
def from_dict(cls, page_contents_dict):
'Creates a subtopic page contents object from a dictionary.\n\n Args:\n page_contents_dict: dict. The dict representation of\n SubtopicPageContents object.\n\n Returns:\n SubtopicPageContents. The corresponding object.\n '
page_contents = state_domain.SubtitledHtml.from_dict(page_contents_dict['subtitled_html'])
page_contents.validate()
return cls(page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict['recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict['written_translations'])) | 1,483,021,645,807,955,700 | Creates a subtopic page contents object from a dictionary.
Args:
page_contents_dict: dict. The dict representation of
SubtopicPageContents object.
Returns:
SubtopicPageContents. The corresponding object. | core/domain/subtopic_page_domain.py | from_dict | 5andeepNambiar/oppia | python | @classmethod
def from_dict(cls, page_contents_dict):
'Creates a subtopic page contents object from a dictionary.\n\n Args:\n page_contents_dict: dict. The dict representation of\n SubtopicPageContents object.\n\n Returns:\n SubtopicPageContents. The corresponding object.\n '
page_contents = state_domain.SubtitledHtml.from_dict(page_contents_dict['subtitled_html'])
page_contents.validate()
return cls(page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict['recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict['written_translations'])) |
def __init__(self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version):
'Constructs a SubtopicPage domain object.\n\n Args:\n subtopic_page_id: str. The unique ID of the subtopic page.\n topic_id: str. The ID of the topic that this subtopic is a part of.\n page_contents: SubtopicPageContents. The html and audio\n translations to be surfaced to the learner.\n page_contents_schema_version: int. The schema version for the page\n contents object.\n language_code: str. The ISO 639-1 code for the language this\n subtopic page is written in.\n version: int. The current version of the subtopic.\n '
self.id = subtopic_page_id
self.topic_id = topic_id
self.page_contents = page_contents
self.page_contents_schema_version = page_contents_schema_version
self.language_code = language_code
self.version = version | 1,706,573,651,113,446,700 | Constructs a SubtopicPage domain object.
Args:
subtopic_page_id: str. The unique ID of the subtopic page.
topic_id: str. The ID of the topic that this subtopic is a part of.
page_contents: SubtopicPageContents. The html and audio
translations to be surfaced to the learner.
page_contents_schema_version: int. The schema version for the page
contents object.
language_code: str. The ISO 639-1 code for the language this
subtopic page is written in.
version: int. The current version of the subtopic. | core/domain/subtopic_page_domain.py | __init__ | 5andeepNambiar/oppia | python | def __init__(self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version):
'Constructs a SubtopicPage domain object.\n\n Args:\n subtopic_page_id: str. The unique ID of the subtopic page.\n topic_id: str. The ID of the topic that this subtopic is a part of.\n page_contents: SubtopicPageContents. The html and audio\n translations to be surfaced to the learner.\n page_contents_schema_version: int. The schema version for the page\n contents object.\n language_code: str. The ISO 639-1 code for the language this\n subtopic page is written in.\n version: int. The current version of the subtopic.\n '
self.id = subtopic_page_id
self.topic_id = topic_id
self.page_contents = page_contents
self.page_contents_schema_version = page_contents_schema_version
self.language_code = language_code
self.version = version |
def to_dict(self):
'Returns a dict representing this SubtopicPage domain object.\n\n Returns:\n dict. A dict, mapping all fields of SubtopicPage instance.\n '
return {'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version} | 1,724,638,480,358,674,200 | Returns a dict representing this SubtopicPage domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPage instance. | core/domain/subtopic_page_domain.py | to_dict | 5andeepNambiar/oppia | python | def to_dict(self):
'Returns a dict representing this SubtopicPage domain object.\n\n Returns:\n dict. A dict, mapping all fields of SubtopicPage instance.\n '
return {'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version} |
@classmethod
def get_subtopic_page_id(cls, topic_id, subtopic_id):
'Returns the subtopic page id from the topic_id and subtopic_id.\n\n Args:\n topic_id: str. The id of the topic that the subtopic is a part of.\n subtopic_id: int. The id of the subtopic.\n\n Returns:\n str. The subtopic_page_id calculated from the given values.\n '
return ('%s-%s' % (topic_id, subtopic_id)) | 7,232,096,110,011,632,000 | Returns the subtopic page id from the topic_id and subtopic_id.
Args:
topic_id: str. The id of the topic that the subtopic is a part of.
subtopic_id: int. The id of the subtopic.
Returns:
str. The subtopic_page_id calculated from the given values. | core/domain/subtopic_page_domain.py | get_subtopic_page_id | 5andeepNambiar/oppia | python | @classmethod
def get_subtopic_page_id(cls, topic_id, subtopic_id):
'Returns the subtopic page id from the topic_id and subtopic_id.\n\n Args:\n topic_id: str. The id of the topic that the subtopic is a part of.\n subtopic_id: int. The id of the subtopic.\n\n Returns:\n str. The subtopic_page_id calculated from the given values.\n '
return ('%s-%s' % (topic_id, subtopic_id)) |
@classmethod
def create_default_subtopic_page(cls, subtopic_id, topic_id):
'Creates a SubtopicPage object with default values.\n\n Args:\n subtopic_id: str. ID of the subtopic.\n topic_id: str. The Id of the topic to which this page is linked\n with.\n\n Returns:\n SubtopicPage. A subtopic object with given id, topic_id and default\n page contents field.\n '
subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id)
return cls(subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) | -8,186,419,653,497,390,000 | Creates a SubtopicPage object with default values.
Args:
subtopic_id: str. ID of the subtopic.
topic_id: str. The Id of the topic to which this page is linked
with.
Returns:
SubtopicPage. A subtopic object with given id, topic_id and default
page contents field. | core/domain/subtopic_page_domain.py | create_default_subtopic_page | 5andeepNambiar/oppia | python | @classmethod
def create_default_subtopic_page(cls, subtopic_id, topic_id):
'Creates a SubtopicPage object with default values.\n\n Args:\n subtopic_id: str. ID of the subtopic.\n topic_id: str. The Id of the topic to which this page is linked\n with.\n\n Returns:\n SubtopicPage. A subtopic object with given id, topic_id and default\n page contents field.\n '
subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id)
return cls(subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) |
@classmethod
def convert_html_fields_in_subtopic_page_contents(cls, subtopic_page_contents_dict, conversion_fn):
'Applies a conversion function on all the html strings in subtopic\n page contents to migrate them to a desired state.\n\n Args:\n subtopic_page_contents_dict: dict. The dict representation of\n subtopic page contents.\n conversion_fn: function. The conversion function to be applied on\n the subtopic_page_contents_dict.\n\n Returns:\n dict. The converted subtopic_page_contents_dict.\n '
subtopic_page_contents_dict['written_translations'] = state_domain.WrittenTranslations.convert_html_in_written_translations(subtopic_page_contents_dict['written_translations'], conversion_fn)
subtopic_page_contents_dict['subtitled_html']['html'] = conversion_fn(subtopic_page_contents_dict['subtitled_html']['html'])
return subtopic_page_contents_dict | 5,791,851,412,395,655,000 | Applies a conversion function on all the html strings in subtopic
page contents to migrate them to a desired state.
Args:
subtopic_page_contents_dict: dict. The dict representation of
subtopic page contents.
conversion_fn: function. The conversion function to be applied on
the subtopic_page_contents_dict.
Returns:
dict. The converted subtopic_page_contents_dict. | core/domain/subtopic_page_domain.py | convert_html_fields_in_subtopic_page_contents | 5andeepNambiar/oppia | python | @classmethod
def convert_html_fields_in_subtopic_page_contents(cls, subtopic_page_contents_dict, conversion_fn):
'Applies a conversion function on all the html strings in subtopic\n page contents to migrate them to a desired state.\n\n Args:\n subtopic_page_contents_dict: dict. The dict representation of\n subtopic page contents.\n conversion_fn: function. The conversion function to be applied on\n the subtopic_page_contents_dict.\n\n Returns:\n dict. The converted subtopic_page_contents_dict.\n '
subtopic_page_contents_dict['written_translations'] = state_domain.WrittenTranslations.convert_html_in_written_translations(subtopic_page_contents_dict['written_translations'], conversion_fn)
subtopic_page_contents_dict['subtitled_html']['html'] = conversion_fn(subtopic_page_contents_dict['subtitled_html']['html'])
return subtopic_page_contents_dict |
@classmethod
def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict):
'Converts v1 SubtopicPage Contents schema to the v2 schema.\n v2 schema introduces the new schema for Math components.\n\n Args:\n page_contents_dict: dict. A dict used to initialize a SubtopicPage\n domain object.\n\n Returns:\n dict. The converted page_contents_dict.\n '
return cls.convert_html_fields_in_subtopic_page_contents(page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) | 1,325,259,631,131,201,000 | Converts v1 SubtopicPage Contents schema to the v2 schema.
v2 schema introduces the new schema for Math components.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict. | core/domain/subtopic_page_domain.py | _convert_page_contents_v1_dict_to_v2_dict | 5andeepNambiar/oppia | python | @classmethod
def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict):
'Converts v1 SubtopicPage Contents schema to the v2 schema.\n v2 schema introduces the new schema for Math components.\n\n Args:\n page_contents_dict: dict. A dict used to initialize a SubtopicPage\n domain object.\n\n Returns:\n dict. The converted page_contents_dict.\n '
return cls.convert_html_fields_in_subtopic_page_contents(page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) |
@classmethod
def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict):
'Converts v2 SubtopicPage Contents schema to the v3 schema.\n v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts\n existing occurences of it to oppia-noninteractive-image tag.\n\n Args:\n page_contents_dict: dict. A dict used to initialize a SubtopicPage\n domain object.\n\n Returns:\n dict. The converted page_contents_dict.\n '
return cls.convert_html_fields_in_subtopic_page_contents(page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) | -5,561,392,608,550,538,000 | Converts v2 SubtopicPage Contents schema to the v3 schema.
v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts
existing occurences of it to oppia-noninteractive-image tag.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict. | core/domain/subtopic_page_domain.py | _convert_page_contents_v2_dict_to_v3_dict | 5andeepNambiar/oppia | python | @classmethod
def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict):
'Converts v2 SubtopicPage Contents schema to the v3 schema.\n v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts\n existing occurences of it to oppia-noninteractive-image tag.\n\n Args:\n page_contents_dict: dict. A dict used to initialize a SubtopicPage\n domain object.\n\n Returns:\n dict. The converted page_contents_dict.\n '
return cls.convert_html_fields_in_subtopic_page_contents(page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) |
@classmethod
def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict):
'Converts v3 SubtopicPage Contents schema to the v4 schema.\n v4 schema fixes HTML encoding issues.\n\n Args:\n page_contents_dict: dict. A dict used to initialize a SubtopicPage\n domain object.\n\n Returns:\n dict. The converted page_contents_dict.\n '
return cls.convert_html_fields_in_subtopic_page_contents(page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) | -3,680,578,198,243,071,000 | Converts v3 SubtopicPage Contents schema to the v4 schema.
v4 schema fixes HTML encoding issues.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict. | core/domain/subtopic_page_domain.py | _convert_page_contents_v3_dict_to_v4_dict | 5andeepNambiar/oppia | python | @classmethod
def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict):
'Converts v3 SubtopicPage Contents schema to the v4 schema.\n v4 schema fixes HTML encoding issues.\n\n Args:\n page_contents_dict: dict. A dict used to initialize a SubtopicPage\n domain object.\n\n Returns:\n dict. The converted page_contents_dict.\n '
return cls.convert_html_fields_in_subtopic_page_contents(page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) |
@classmethod
def update_page_contents_from_model(cls, versioned_page_contents, current_version):
'Converts the page_contents blob contained in the given\n versioned_page_contents dict from current_version to\n current_version + 1. Note that the versioned_page_contents being\n passed in is modified in-place.\n\n Args:\n versioned_page_contents: dict. A dict with two keys:\n - schema_version: str. The schema version for the\n page_contents dict.\n - page_contents: dict. The dict comprising the subtopic page\n contents.\n current_version: int. The current schema version of page_contents.\n '
versioned_page_contents['schema_version'] = (current_version + 1)
conversion_fn = getattr(cls, ('_convert_page_contents_v%s_dict_to_v%s_dict' % (current_version, (current_version + 1))))
versioned_page_contents['page_contents'] = conversion_fn(versioned_page_contents['page_contents']) | -918,806,388,972,543,200 | Converts the page_contents blob contained in the given
versioned_page_contents dict from current_version to
current_version + 1. Note that the versioned_page_contents being
passed in is modified in-place.
Args:
versioned_page_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
page_contents dict.
- page_contents: dict. The dict comprising the subtopic page
contents.
current_version: int. The current schema version of page_contents. | core/domain/subtopic_page_domain.py | update_page_contents_from_model | 5andeepNambiar/oppia | python | @classmethod
def update_page_contents_from_model(cls, versioned_page_contents, current_version):
'Converts the page_contents blob contained in the given\n versioned_page_contents dict from current_version to\n current_version + 1. Note that the versioned_page_contents being\n passed in is modified in-place.\n\n Args:\n versioned_page_contents: dict. A dict with two keys:\n - schema_version: str. The schema version for the\n page_contents dict.\n - page_contents: dict. The dict comprising the subtopic page\n contents.\n current_version: int. The current schema version of page_contents.\n '
versioned_page_contents['schema_version'] = (current_version + 1)
conversion_fn = getattr(cls, ('_convert_page_contents_v%s_dict_to_v%s_dict' % (current_version, (current_version + 1))))
versioned_page_contents['page_contents'] = conversion_fn(versioned_page_contents['page_contents']) |
def get_subtopic_id_from_subtopic_page_id(self):
'Returns the id from the subtopic page id of the object.\n\n Returns:\n int. The subtopic_id of the object.\n '
return int(self.id[(len(self.topic_id) + 1):]) | -8,457,681,005,800,905,000 | Returns the id from the subtopic page id of the object.
Returns:
int. The subtopic_id of the object. | core/domain/subtopic_page_domain.py | get_subtopic_id_from_subtopic_page_id | 5andeepNambiar/oppia | python | def get_subtopic_id_from_subtopic_page_id(self):
'Returns the id from the subtopic page id of the object.\n\n Returns:\n int. The subtopic_id of the object.\n '
return int(self.id[(len(self.topic_id) + 1):]) |
def update_page_contents_html(self, new_page_contents_html):
'The new value for the html data field.\n\n Args:\n new_page_contents_html: SubtitledHtml. The new html for the subtopic\n page.\n '
self.page_contents.subtitled_html = new_page_contents_html | -1,721,253,086,845,089,000 | The new value for the html data field.
Args:
new_page_contents_html: SubtitledHtml. The new html for the subtopic
page. | core/domain/subtopic_page_domain.py | update_page_contents_html | 5andeepNambiar/oppia | python | def update_page_contents_html(self, new_page_contents_html):
'The new value for the html data field.\n\n Args:\n new_page_contents_html: SubtitledHtml. The new html for the subtopic\n page.\n '
self.page_contents.subtitled_html = new_page_contents_html |
def update_page_contents_audio(self, new_page_contents_audio):
'The new value for the recorded_voiceovers data field.\n\n Args:\n new_page_contents_audio: RecordedVoiceovers. The new audio for\n the subtopic page.\n '
self.page_contents.recorded_voiceovers = new_page_contents_audio | -3,496,032,183,837,896,700 | The new value for the recorded_voiceovers data field.
Args:
new_page_contents_audio: RecordedVoiceovers. The new audio for
the subtopic page. | core/domain/subtopic_page_domain.py | update_page_contents_audio | 5andeepNambiar/oppia | python | def update_page_contents_audio(self, new_page_contents_audio):
'The new value for the recorded_voiceovers data field.\n\n Args:\n new_page_contents_audio: RecordedVoiceovers. The new audio for\n the subtopic page.\n '
self.page_contents.recorded_voiceovers = new_page_contents_audio |
def update_page_contents_written_translations(self, new_page_written_translations_dict):
'The new value for the written_translations data field.\n\n Args:\n new_page_written_translations_dict: dict. The new translation for\n the subtopic page.\n '
self.page_contents.written_translations = state_domain.WrittenTranslations.from_dict(new_page_written_translations_dict) | -7,942,884,331,211,655,000 | The new value for the written_translations data field.
Args:
new_page_written_translations_dict: dict. The new translation for
the subtopic page. | core/domain/subtopic_page_domain.py | update_page_contents_written_translations | 5andeepNambiar/oppia | python | def update_page_contents_written_translations(self, new_page_written_translations_dict):
'The new value for the written_translations data field.\n\n Args:\n new_page_written_translations_dict: dict. The new translation for\n the subtopic page.\n '
self.page_contents.written_translations = state_domain.WrittenTranslations.from_dict(new_page_written_translations_dict) |
def validate(self):
'Validates various properties of the SubtopicPage object.\n\n Raises:\n ValidationError. One or more attributes of the subtopic page are\n invalid.\n '
if (not isinstance(self.topic_id, python_utils.BASESTRING)):
raise utils.ValidationError(('Expected topic_id to be a string, received %s' % self.topic_id))
if (not isinstance(self.version, int)):
raise utils.ValidationError(('Expected version number to be an int, received %s' % self.version))
self.page_contents.validate()
if (not isinstance(self.page_contents_schema_version, int)):
raise utils.ValidationError(('Expected page contents schema version to be an integer, received %s' % self.page_contents_schema_version))
if (self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(('Expected page contents schema version to be %s, received %s' % (feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version)))
if (not isinstance(self.language_code, python_utils.BASESTRING)):
raise utils.ValidationError(('Expected language code to be a string, received %s' % self.language_code))
if (not any(((self.language_code == lc['code']) for lc in constants.SUPPORTED_CONTENT_LANGUAGES))):
raise utils.ValidationError(('Invalid language code: %s' % self.language_code)) | -8,402,969,171,900,102,000 | Validates various properties of the SubtopicPage object.
Raises:
ValidationError. One or more attributes of the subtopic page are
invalid. | core/domain/subtopic_page_domain.py | validate | 5andeepNambiar/oppia | python | def validate(self):
'Validates various properties of the SubtopicPage object.\n\n Raises:\n ValidationError. One or more attributes of the subtopic page are\n invalid.\n '
if (not isinstance(self.topic_id, python_utils.BASESTRING)):
raise utils.ValidationError(('Expected topic_id to be a string, received %s' % self.topic_id))
if (not isinstance(self.version, int)):
raise utils.ValidationError(('Expected version number to be an int, received %s' % self.version))
self.page_contents.validate()
if (not isinstance(self.page_contents_schema_version, int)):
raise utils.ValidationError(('Expected page contents schema version to be an integer, received %s' % self.page_contents_schema_version))
if (self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(('Expected page contents schema version to be %s, received %s' % (feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version)))
if (not isinstance(self.language_code, python_utils.BASESTRING)):
raise utils.ValidationError(('Expected language code to be a string, received %s' % self.language_code))
if (not any(((self.language_code == lc['code']) for lc in constants.SUPPORTED_CONTENT_LANGUAGES))):
raise utils.ValidationError(('Invalid language code: %s' % self.language_code)) |
def _benchmark_create_tensor(self, value, dtype, device):
'Benchmark overheads of creating a Tensor object.'
ctx = context.context()
handle = ctx._handle
if (device == GPU):
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000) | -5,017,158,884,690,301,000 | Benchmark overheads of creating a Tensor object. | tensorflow/python/eager/benchmarks_test.py | _benchmark_create_tensor | AishwaryaVarma/tensorflow | python | def _benchmark_create_tensor(self, value, dtype, device):
ctx = context.context()
handle = ctx._handle
if (device == GPU):
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000) |
def __init__(self, connection):
'Constructor for class RootServiceImpl, from RootService\n\n :param connection: The connection data\n '
super().__init__(connection) | -1,078,980,048,766,393,100 | Constructor for class RootServiceImpl, from RootService
:param connection: The connection data | pyopenproject/business/services/root_service_impl.py | __init__ | Flying-Free/pyopenproject | python | def __init__(self, connection):
'Constructor for class RootServiceImpl, from RootService\n\n :param connection: The connection data\n '
super().__init__(connection) |
def create_text_id_annotations(text_id_annotation_request=None):
'Annotate IDs in a clinical note\n\n Return the ID annotations found in a clinical note # noqa: E501\n\n :param text_id_annotation_request:\n :type text_id_annotation_request: dict | bytes\n\n :rtype: TextIdAnnotationResponse\n '
if connexion.request.is_json:
try:
annotation_request = TextIdAnnotationRequest.from_dict(connexion.request.get_json())
note = annotation_request.note
annotations = get_annotations(note, phi_type=PhiType.ID)
res = TextIdAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
res = Error('Internal error', status, str(error))
return (res, status) | 5,121,373,807,043,872,000 | Annotate IDs in a clinical note
Return the ID annotations found in a clinical note # noqa: E501
:param text_id_annotation_request:
:type text_id_annotation_request: dict | bytes
:rtype: TextIdAnnotationResponse | server/openapi_server/controllers/text_id_annotation_controller.py | create_text_id_annotations | cascadianblue/phi-annotator | python | def create_text_id_annotations(text_id_annotation_request=None):
'Annotate IDs in a clinical note\n\n Return the ID annotations found in a clinical note # noqa: E501\n\n :param text_id_annotation_request:\n :type text_id_annotation_request: dict | bytes\n\n :rtype: TextIdAnnotationResponse\n '
if connexion.request.is_json:
try:
annotation_request = TextIdAnnotationRequest.from_dict(connexion.request.get_json())
note = annotation_request.note
annotations = get_annotations(note, phi_type=PhiType.ID)
res = TextIdAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
res = Error('Internal error', status, str(error))
return (res, status) |
def _handle_exception(self, exception):
'Called within an except block to allow converting exceptions\n to arbitrary responses. Anything returned (except None) will\n be used as response.'
try:
return super(WebSocketRpcRequest, self)._handle_exception(exception)
except Exception:
if (not isinstance(exception, (odoo.exceptions.Warning, odoo.http.SessionExpiredException, odoo.exceptions.except_orm))):
_logger.exception('Exception during JSON request handling.')
error = {'code': 200, 'message': 'Odoo Server Error', 'data': odoo.http.serialize_exception(exception)}
if isinstance(exception, odoo.http.AuthenticationError):
error['code'] = 100
error['message'] = 'Odoo Session Invalid'
if isinstance(exception, odoo.http.SessionExpiredException):
error['code'] = 100
error['message'] = 'Odoo Session Expired'
return self._json_response(error=error) | -3,700,882,412,083,221,000 | Called within an except block to allow converting exceptions
to arbitrary responses. Anything returned (except None) will
be used as response. | odooku/services/websocket/requests.py | _handle_exception | 12thmar/marodooku | python | def _handle_exception(self, exception):
'Called within an except block to allow converting exceptions\n to arbitrary responses. Anything returned (except None) will\n be used as response.'
try:
return super(WebSocketRpcRequest, self)._handle_exception(exception)
except Exception:
if (not isinstance(exception, (odoo.exceptions.Warning, odoo.http.SessionExpiredException, odoo.exceptions.except_orm))):
_logger.exception('Exception during JSON request handling.')
error = {'code': 200, 'message': 'Odoo Server Error', 'data': odoo.http.serialize_exception(exception)}
if isinstance(exception, odoo.http.AuthenticationError):
error['code'] = 100
error['message'] = 'Odoo Session Invalid'
if isinstance(exception, odoo.http.SessionExpiredException):
error['code'] = 100
error['message'] = 'Odoo Session Expired'
return self._json_response(error=error) |
def get_change_address(node):
'Get a wallet change address.\n\n There is no wallet RPC to access unused change addresses, so this creates a\n dummy transaction, calls fundrawtransaction to give add an input and change\n output, then returns the change address.'
dest_address = node.getnewaddress()
dest_amount = Decimal('0.012345')
rawtx = node.createrawtransaction([], {dest_address: dest_amount})
fundtx = node.fundrawtransaction(rawtx)
info = node.decoderawtransaction(fundtx['hex'])
return next((address for out in info['vout'] if (out['value'] != dest_amount) for address in out['scriptPubKey']['addresses'])) | -6,720,695,055,454,696,000 | Get a wallet change address.
There is no wallet RPC to access unused change addresses, so this creates a
dummy transaction, calls fundrawtransaction to give add an input and change
output, then returns the change address. | qa/rpc-tests/bumpfee.py | get_change_address | citrixrep/rec | python | def get_change_address(node):
'Get a wallet change address.\n\n There is no wallet RPC to access unused change addresses, so this creates a\n dummy transaction, calls fundrawtransaction to give add an input and change\n output, then returns the change address.'
dest_address = node.getnewaddress()
dest_amount = Decimal('0.012345')
rawtx = node.createrawtransaction([], {dest_address: dest_amount})
fundtx = node.fundrawtransaction(rawtx)
info = node.decoderawtransaction(fundtx['hex'])
return next((address for out in info['vout'] if (out['value'] != dest_amount) for address in out['scriptPubKey']['addresses'])) |
def _generate_immediate_neighbours(pattern: str) -> list:
'\n Generate immediate (different by one mismatch) neighbours of the given genome pattern\n :param pattern: a pattern to examine\n :return: neighbourhood, NOT including the given pattern\n '
generated = []
for i in range(len(pattern)):
if (pattern[i] == 'A'):
generated.extend([((pattern[:i] + c) + pattern[(i + 1):]) for c in LIST_A])
elif (pattern[i] == 'C'):
generated.extend([((pattern[:i] + c) + pattern[(i + 1):]) for c in LIST_C])
elif (pattern[i] == 'T'):
generated.extend([((pattern[:i] + c) + pattern[(i + 1):]) for c in LIST_T])
elif (pattern[i] == 'G'):
generated.extend([((pattern[:i] + c) + pattern[(i + 1):]) for c in LIST_G])
return generated | -1,175,868,181,967,810,000 | Generate immediate (different by one mismatch) neighbours of the given genome pattern
:param pattern: a pattern to examine
:return: neighbourhood, NOT including the given pattern | hw1/approximate_occurrences.py | _generate_immediate_neighbours | leskin-in/mipt-bioalgo | python | def _generate_immediate_neighbours(pattern: str) -> list:
'\n Generate immediate (different by one mismatch) neighbours of the given genome pattern\n :param pattern: a pattern to examine\n :return: neighbourhood, NOT including the given pattern\n '
generated = []
for i in range(len(pattern)):
if (pattern[i] == 'A'):
generated.extend([((pattern[:i] + c) + pattern[(i + 1):]) for c in LIST_A])
elif (pattern[i] == 'C'):
generated.extend([((pattern[:i] + c) + pattern[(i + 1):]) for c in LIST_C])
elif (pattern[i] == 'T'):
generated.extend([((pattern[:i] + c) + pattern[(i + 1):]) for c in LIST_T])
elif (pattern[i] == 'G'):
generated.extend([((pattern[:i] + c) + pattern[(i + 1):]) for c in LIST_G])
return generated |
def generate_neighbours(pattern: str, mismatches: int) -> set:
"\n Generate neighbours for the given pattern (genome string)\n :param pattern: genome pattern\n :param mismatches: number of mismatches to generate neighbours\n :return: a set of patterns in the neighbourhood, including the 'pattern' itself\n "
neighbourhood = set()
neighbourhood.add(pattern)
curr_patterns = [pattern]
next_patterns = []
for curr_mismatches in range(mismatches):
for curr_pattern in curr_patterns:
for neighbour in _generate_immediate_neighbours(curr_pattern):
if (neighbour not in neighbourhood):
neighbourhood.add(neighbour)
next_patterns.append(neighbour)
curr_patterns = next_patterns
next_patterns = []
return neighbourhood | 6,452,492,989,575,117,000 | Generate neighbours for the given pattern (genome string)
:param pattern: genome pattern
:param mismatches: number of mismatches to generate neighbours
:return: a set of patterns in the neighbourhood, including the 'pattern' itself | hw1/approximate_occurrences.py | generate_neighbours | leskin-in/mipt-bioalgo | python | def generate_neighbours(pattern: str, mismatches: int) -> set:
"\n Generate neighbours for the given pattern (genome string)\n :param pattern: genome pattern\n :param mismatches: number of mismatches to generate neighbours\n :return: a set of patterns in the neighbourhood, including the 'pattern' itself\n "
neighbourhood = set()
neighbourhood.add(pattern)
curr_patterns = [pattern]
next_patterns = []
for curr_mismatches in range(mismatches):
for curr_pattern in curr_patterns:
for neighbour in _generate_immediate_neighbours(curr_pattern):
if (neighbour not in neighbourhood):
neighbourhood.add(neighbour)
next_patterns.append(neighbour)
curr_patterns = next_patterns
next_patterns = []
return neighbourhood |
@property
@abstractmethod
def label_type(self):
'Each model predicts labels of a certain type. TODO: can we find a better name for this?'
raise NotImplementedError | 5,872,135,688,701,695,000 | Each model predicts labels of a certain type. TODO: can we find a better name for this? | flair/nn/model.py | label_type | MaxDall/flair | python | @property
@abstractmethod
def label_type(self):
raise NotImplementedError |
@abstractmethod
def forward_loss(self, data_points: Union[(List[DataPoint], DataPoint)]) -> torch.tensor:
'Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training.'
raise NotImplementedError | 7,063,702,480,755,870,000 | Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training. | flair/nn/model.py | forward_loss | MaxDall/flair | python | @abstractmethod
def forward_loss(self, data_points: Union[(List[DataPoint], DataPoint)]) -> torch.tensor:
raise NotImplementedError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.