body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def _pretty_print_target(self, target): '\n Make target printouts more user-friendly.\n 1) builtins will be printed as `builtins.xyz`\n 2) operators will be printed as `operator.xyz`\n 3) other callables will be printed with qualfied name, e.g. torch.add\n ' if isinstance(target, str): return target if hasattr(target, '__module__'): if (not hasattr(target, '__name__')): return _get_qualified_name(target) if (target.__module__ == 'builtins'): return f'builtins.{target.__name__}' elif (target.__module__ == '_operator'): return f'operator.{target.__name__}' return _get_qualified_name(target)
-4,326,301,714,006,745,600
Make target printouts more user-friendly. 1) builtins will be printed as `builtins.xyz` 2) operators will be printed as `operator.xyz` 3) other callables will be printed with qualfied name, e.g. torch.add
venv/Lib/site-packages/torch/fx/node.py
_pretty_print_target
Westlanderz/AI-Plat1
python
def _pretty_print_target(self, target): '\n Make target printouts more user-friendly.\n 1) builtins will be printed as `builtins.xyz`\n 2) operators will be printed as `operator.xyz`\n 3) other callables will be printed with qualfied name, e.g. torch.add\n ' if isinstance(target, str): return target if hasattr(target, '__module__'): if (not hasattr(target, '__name__')): return _get_qualified_name(target) if (target.__module__ == 'builtins'): return f'builtins.{target.__name__}' elif (target.__module__ == '_operator'): return f'operator.{target.__name__}' return _get_qualified_name(target)
@compatibility(is_backward_compatible=True) def format_node(self, placeholder_names: List[str]=None, maybe_return_typename: List[str]=None) -> Optional[str]: "\n Return a descriptive string representation of ``self``.\n\n This method can be used with no arguments as a debugging\n utility.\n\n This function is also used internally in the ``__str__`` method\n of ``Graph``. Together, the strings in ``placeholder_names``\n and ``maybe_return_typename`` make up the signature of the\n autogenerated ``forward`` function in this Graph's surrounding\n GraphModule. ``placeholder_names`` and ``maybe_return_typename``\n should not be used otherwise.\n\n Args:\n placeholder_names: A list that will store formatted strings\n representing the placeholders in the generated\n ``forward`` function. Internal use only.\n maybe_return_typename: A single-element list that will store\n a formatted string representing the output of the\n generated ``forward`` function. Internal use only.\n\n Returns:\n str: If 1) we're using ``format_node`` as an internal helper\n in the ``__str__`` method of ``Graph``, and 2) ``self``\n is a placeholder Node, return ``None``. Otherwise,\n return a descriptive string representation of the\n current Node.\n " if (self.op == 'placeholder'): assert isinstance(self.target, str) arg_str = self.target arg_str += ((arg_str + f': {_type_repr(self.type)}') if self.type else '') if placeholder_names: placeholder_names.append(arg_str) return None maybe_typename = (f'{_type_repr(self.type)} ' if self.type else '') default_val = ((('(default=' + str(self.args[0])) + ')') if self.args else '') return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self.target}]{default_val}' elif (self.op == 'get_attr'): maybe_typename = (f'{_type_repr(self.type)} ' if (self.type is not None) else '') return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self._pretty_print_target(self.target)}]' elif (self.op == 'output'): if (self.type and maybe_return_typename): maybe_return_typename[0] = f' -> {_type_repr(self.type)}' return f'return {self.args[0]}' else: maybe_typename = (f'{_type_repr(self.type)} ' if (self.type is not None) else '') return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self._pretty_print_target(self.target)}](args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})'
2,380,465,824,610,337,000
Return a descriptive string representation of ``self``. This method can be used with no arguments as a debugging utility. This function is also used internally in the ``__str__`` method of ``Graph``. Together, the strings in ``placeholder_names`` and ``maybe_return_typename`` make up the signature of the autogenerated ``forward`` function in this Graph's surrounding GraphModule. ``placeholder_names`` and ``maybe_return_typename`` should not be used otherwise. Args: placeholder_names: A list that will store formatted strings representing the placeholders in the generated ``forward`` function. Internal use only. maybe_return_typename: A single-element list that will store a formatted string representing the output of the generated ``forward`` function. Internal use only. Returns: str: If 1) we're using ``format_node`` as an internal helper in the ``__str__`` method of ``Graph``, and 2) ``self`` is a placeholder Node, return ``None``. Otherwise, return a descriptive string representation of the current Node.
venv/Lib/site-packages/torch/fx/node.py
format_node
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def format_node(self, placeholder_names: List[str]=None, maybe_return_typename: List[str]=None) -> Optional[str]: "\n Return a descriptive string representation of ``self``.\n\n This method can be used with no arguments as a debugging\n utility.\n\n This function is also used internally in the ``__str__`` method\n of ``Graph``. Together, the strings in ``placeholder_names``\n and ``maybe_return_typename`` make up the signature of the\n autogenerated ``forward`` function in this Graph's surrounding\n GraphModule. ``placeholder_names`` and ``maybe_return_typename``\n should not be used otherwise.\n\n Args:\n placeholder_names: A list that will store formatted strings\n representing the placeholders in the generated\n ``forward`` function. Internal use only.\n maybe_return_typename: A single-element list that will store\n a formatted string representing the output of the\n generated ``forward`` function. Internal use only.\n\n Returns:\n str: If 1) we're using ``format_node`` as an internal helper\n in the ``__str__`` method of ``Graph``, and 2) ``self``\n is a placeholder Node, return ``None``. Otherwise,\n return a descriptive string representation of the\n current Node.\n " if (self.op == 'placeholder'): assert isinstance(self.target, str) arg_str = self.target arg_str += ((arg_str + f': {_type_repr(self.type)}') if self.type else ) if placeholder_names: placeholder_names.append(arg_str) return None maybe_typename = (f'{_type_repr(self.type)} ' if self.type else ) default_val = ((('(default=' + str(self.args[0])) + ')') if self.args else ) return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self.target}]{default_val}' elif (self.op == 'get_attr'): maybe_typename = (f'{_type_repr(self.type)} ' if (self.type is not None) else ) return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self._pretty_print_target(self.target)}]' elif (self.op == 'output'): if (self.type and maybe_return_typename): maybe_return_typename[0] = f' -> {_type_repr(self.type)}' return f'return {self.args[0]}' else: maybe_typename = (f'{_type_repr(self.type)} ' if (self.type is not None) else ) return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self._pretty_print_target(self.target)}](args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})'
@compatibility(is_backward_compatible=True) def replace_all_uses_with(self, replace_with: 'Node') -> List['Node']: '\n Replace all uses of ``self`` in the Graph with the Node ``replace_with``.\n\n Args:\n\n replace_with (Node): The node to replace all uses of ``self`` with.\n\n Returns:\n\n The list of Nodes on which this change was made.\n ' to_process = list(self.users) for use_node in to_process: def maybe_replace_node(n: Node) -> Node: if (n == self): return replace_with else: return n new_args = map_arg(use_node.args, maybe_replace_node) new_kwargs = map_arg(use_node.kwargs, maybe_replace_node) assert isinstance(new_args, tuple) assert isinstance(new_kwargs, dict) use_node.__update_args_kwargs(new_args, new_kwargs) assert (len(self.users) == 0) return to_process
-1,574,887,777,613,111,800
Replace all uses of ``self`` in the Graph with the Node ``replace_with``. Args: replace_with (Node): The node to replace all uses of ``self`` with. Returns: The list of Nodes on which this change was made.
venv/Lib/site-packages/torch/fx/node.py
replace_all_uses_with
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def replace_all_uses_with(self, replace_with: 'Node') -> List['Node']: '\n Replace all uses of ``self`` in the Graph with the Node ``replace_with``.\n\n Args:\n\n replace_with (Node): The node to replace all uses of ``self`` with.\n\n Returns:\n\n The list of Nodes on which this change was made.\n ' to_process = list(self.users) for use_node in to_process: def maybe_replace_node(n: Node) -> Node: if (n == self): return replace_with else: return n new_args = map_arg(use_node.args, maybe_replace_node) new_kwargs = map_arg(use_node.kwargs, maybe_replace_node) assert isinstance(new_args, tuple) assert isinstance(new_kwargs, dict) use_node.__update_args_kwargs(new_args, new_kwargs) assert (len(self.users) == 0) return to_process
@compatibility(is_backward_compatible=False) def is_impure(self): '\n Returns whether this op is impure, i.e. if its op is a placeholder or\n output, or if a call_function or call_module which is impure.\n\n Returns:\n\n bool: If the op is impure or not.\n ' if (self.op in {'placeholder', 'output'}): return True if (self.op == 'call_function'): return (self.target in _side_effectful_functions) if (self.op == 'call_module'): assert (self.graph.owning_module is not None), 'self.graph.owning_module not set for purity check' target_mod = self.graph.owning_module.get_submodule(self.target) assert (target_mod is not None), f'Did not find expected submodule target {self.target}' return getattr(target_mod, '_is_impure', False) return False
-7,435,228,998,158,217,000
Returns whether this op is impure, i.e. if its op is a placeholder or output, or if a call_function or call_module which is impure. Returns: bool: If the op is impure or not.
venv/Lib/site-packages/torch/fx/node.py
is_impure
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=False) def is_impure(self): '\n Returns whether this op is impure, i.e. if its op is a placeholder or\n output, or if a call_function or call_module which is impure.\n\n Returns:\n\n bool: If the op is impure or not.\n ' if (self.op in {'placeholder', 'output'}): return True if (self.op == 'call_function'): return (self.target in _side_effectful_functions) if (self.op == 'call_module'): assert (self.graph.owning_module is not None), 'self.graph.owning_module not set for purity check' target_mod = self.graph.owning_module.get_submodule(self.target) assert (target_mod is not None), f'Did not find expected submodule target {self.target}' return getattr(target_mod, '_is_impure', False) return False
@compatibility(is_backward_compatible=False) def normalized_arguments(self, root: torch.nn.Module, arg_types: Optional[Tuple[Any]]=None, kwarg_types: Optional[Dict[(str, Any)]]=None, normalize_to_only_use_kwargs: bool=False) -> Optional[ArgsKwargsPair]: "\n Returns normalized arguments to Python targets. This means that\n `args/kwargs` will be matched up to the module/functional's\n signature and return exclusively kwargs in positional order\n if `normalize_to_only_use_kwargs` is true.\n Also populates default values. Does not support positional-only\n parameters or varargs parameters.\n\n Supports module calls.\n\n May require `arg_types` and `kwarg_types` in order to disambiguate overloads.\n\n Args:\n root (torch.nn.Module): Module upon which to resolve module targets.\n arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args\n kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs\n normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.\n\n Returns:\n\n Returns NamedTuple ArgsKwargsPair, or `None` if not successful.\n " if (self.op == 'call_function'): assert callable(self.target) return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) elif (self.op == 'call_module'): assert isinstance(self.target, str) return normalize_module(root, self.target, self.args, self.kwargs) return None
8,207,796,060,024,680,000
Returns normalized arguments to Python targets. This means that `args/kwargs` will be matched up to the module/functional's signature and return exclusively kwargs in positional order if `normalize_to_only_use_kwargs` is true. Also populates default values. Does not support positional-only parameters or varargs parameters. Supports module calls. May require `arg_types` and `kwarg_types` in order to disambiguate overloads. Args: root (torch.nn.Module): Module upon which to resolve module targets. arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. Returns: Returns NamedTuple ArgsKwargsPair, or `None` if not successful.
venv/Lib/site-packages/torch/fx/node.py
normalized_arguments
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=False) def normalized_arguments(self, root: torch.nn.Module, arg_types: Optional[Tuple[Any]]=None, kwarg_types: Optional[Dict[(str, Any)]]=None, normalize_to_only_use_kwargs: bool=False) -> Optional[ArgsKwargsPair]: "\n Returns normalized arguments to Python targets. This means that\n `args/kwargs` will be matched up to the module/functional's\n signature and return exclusively kwargs in positional order\n if `normalize_to_only_use_kwargs` is true.\n Also populates default values. Does not support positional-only\n parameters or varargs parameters.\n\n Supports module calls.\n\n May require `arg_types` and `kwarg_types` in order to disambiguate overloads.\n\n Args:\n root (torch.nn.Module): Module upon which to resolve module targets.\n arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args\n kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs\n normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.\n\n Returns:\n\n Returns NamedTuple ArgsKwargsPair, or `None` if not successful.\n " if (self.op == 'call_function'): assert callable(self.target) return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) elif (self.op == 'call_module'): assert isinstance(self.target, str) return normalize_module(root, self.target, self.args, self.kwargs) return None
@compatibility(is_backward_compatible=True) def replace_input_with(self, old_input: 'Node', new_input: 'Node'): '\n Loop through input nodes of ``self``, and replace all instances of\n ``old_input`` with ``new_input``.\n\n Args:\n\n old_input (Node): The old input node to be replaced.\n new_input (Node): The new input node to replace ``old_input``.\n ' def maybe_replace_node(n: Node) -> Node: return (new_input if (n == old_input) else n) new_args = map_arg(self.args, maybe_replace_node) new_kwargs = map_arg(self.kwargs, maybe_replace_node) assert isinstance(new_args, tuple) assert isinstance(new_kwargs, dict) self.__update_args_kwargs(new_args, new_kwargs)
-5,413,264,850,184,298,000
Loop through input nodes of ``self``, and replace all instances of ``old_input`` with ``new_input``. Args: old_input (Node): The old input node to be replaced. new_input (Node): The new input node to replace ``old_input``.
venv/Lib/site-packages/torch/fx/node.py
replace_input_with
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def replace_input_with(self, old_input: 'Node', new_input: 'Node'): '\n Loop through input nodes of ``self``, and replace all instances of\n ``old_input`` with ``new_input``.\n\n Args:\n\n old_input (Node): The old input node to be replaced.\n new_input (Node): The new input node to replace ``old_input``.\n ' def maybe_replace_node(n: Node) -> Node: return (new_input if (n == old_input) else n) new_args = map_arg(self.args, maybe_replace_node) new_kwargs = map_arg(self.kwargs, maybe_replace_node) assert isinstance(new_args, tuple) assert isinstance(new_kwargs, dict) self.__update_args_kwargs(new_args, new_kwargs)
def execute(argv): 'Generate genetic sequences for each benchmark' del argv FLAGS = flags.FLAGS benchmarks = IO.load_yaml(FLAGS.benchmarks_filename) if (not benchmarks): logging.error('There are no benchmarks to process') sys.exit(1) if (not os.path.isdir(FLAGS.benchmarks_directory)): logging.error('Benchmarks directory {} does not exist.'.format(FLAGS.benchmarks_directory)) sys.exit(1) try: os.makedirs(FLAGS.results_directory) except FileExistsError: pass sga = SGA(FLAGS.generations, FLAGS.population, FLAGS.cr, FLAGS.m, FLAGS.param_m, FLAGS.param_s, FLAGS.crossover, FLAGS.mutation, FLAGS.selection, FLAGS.seed, FLAGS.dimension, FLAGS.passes_filename, Goals.prepare_goals(FLAGS.goals, FLAGS.weights), 'opt', FLAGS.benchmarks_directory, FLAGS.working_set, FLAGS.times, FLAGS.tool, FLAGS.verify_output) for benchmark in tqdm(benchmarks, desc='Processing'): index = benchmark.find('.') bench_dir = benchmark[:index] bench_name = benchmark[(index + 1):] bench_dir = os.path.join(FLAGS.results_directory, bench_dir) try: os.makedirs(bench_dir) except FileExistsError: pass filename = '{}/{}.yaml'.format(bench_dir, bench_name) if (FLAGS.verify_report and os.path.isfile(filename)): continue sga.run(benchmark) if sga.results: IO.dump_yaml(sga.results, filename, FLAGS.report_only_the_best)
5,197,675,214,564,177,000
Generate genetic sequences for each benchmark
examples/algorithms/sga.py
execute
ComputerSystemsLab/OptimizationCache
python
def execute(argv): del argv FLAGS = flags.FLAGS benchmarks = IO.load_yaml(FLAGS.benchmarks_filename) if (not benchmarks): logging.error('There are no benchmarks to process') sys.exit(1) if (not os.path.isdir(FLAGS.benchmarks_directory)): logging.error('Benchmarks directory {} does not exist.'.format(FLAGS.benchmarks_directory)) sys.exit(1) try: os.makedirs(FLAGS.results_directory) except FileExistsError: pass sga = SGA(FLAGS.generations, FLAGS.population, FLAGS.cr, FLAGS.m, FLAGS.param_m, FLAGS.param_s, FLAGS.crossover, FLAGS.mutation, FLAGS.selection, FLAGS.seed, FLAGS.dimension, FLAGS.passes_filename, Goals.prepare_goals(FLAGS.goals, FLAGS.weights), 'opt', FLAGS.benchmarks_directory, FLAGS.working_set, FLAGS.times, FLAGS.tool, FLAGS.verify_output) for benchmark in tqdm(benchmarks, desc='Processing'): index = benchmark.find('.') bench_dir = benchmark[:index] bench_name = benchmark[(index + 1):] bench_dir = os.path.join(FLAGS.results_directory, bench_dir) try: os.makedirs(bench_dir) except FileExistsError: pass filename = '{}/{}.yaml'.format(bench_dir, bench_name) if (FLAGS.verify_report and os.path.isfile(filename)): continue sga.run(benchmark) if sga.results: IO.dump_yaml(sga.results, filename, FLAGS.report_only_the_best)
def main(): 'Run administrative tasks.' os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apibox.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv)
4,307,895,462,495,865,300
Run administrative tasks.
manage.py
main
woodonggyu/apibox
python
def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apibox.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv)
def object_to_dict(obj, list_depth=1): 'Convert Suds object into serializable format.\n\n The calling function can limit the amount of list entries that\n are converted.\n ' d = {} for (k, v) in suds.sudsobject.asdict(obj).iteritems(): if hasattr(v, '__keylist__'): d[k] = object_to_dict(v, list_depth=list_depth) elif isinstance(v, list): d[k] = [] used = 0 for item in v: used = (used + 1) if (used > list_depth): break if hasattr(item, '__keylist__'): d[k].append(object_to_dict(item, list_depth=list_depth)) else: d[k].append(item) else: d[k] = v return d
-3,044,999,984,033,977,300
Convert Suds object into serializable format. The calling function can limit the amount of list entries that are converted.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
object_to_dict
Hybrid-Cloud/badam
python
def object_to_dict(obj, list_depth=1): 'Convert Suds object into serializable format.\n\n The calling function can limit the amount of list entries that\n are converted.\n ' d = {} for (k, v) in suds.sudsobject.asdict(obj).iteritems(): if hasattr(v, '__keylist__'): d[k] = object_to_dict(v, list_depth=list_depth) elif isinstance(v, list): d[k] = [] used = 0 for item in v: used = (used + 1) if (used > list_depth): break if hasattr(item, '__keylist__'): d[k].append(object_to_dict(item, list_depth=list_depth)) else: d[k].append(item) else: d[k] = v return d
def get_object_properties(vim, collector, mobj, type, properties): 'Gets the properties of the Managed object specified.' client_factory = vim.client.factory if (mobj is None): return None usecoll = collector if (usecoll is None): usecoll = vim.service_content.propertyCollector property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = ((properties is None) or (len(properties) == 0)) property_spec.pathSet = properties property_spec.type = type object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = mobj object_spec.skip = False property_filter_spec.propSet = [property_spec] property_filter_spec.objectSet = [object_spec] options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx(usecoll, specSet=[property_filter_spec], options=options)
-1,476,953,285,040,833,300
Gets the properties of the Managed object specified.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_object_properties
Hybrid-Cloud/badam
python
def get_object_properties(vim, collector, mobj, type, properties): client_factory = vim.client.factory if (mobj is None): return None usecoll = collector if (usecoll is None): usecoll = vim.service_content.propertyCollector property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = ((properties is None) or (len(properties) == 0)) property_spec.pathSet = properties property_spec.type = type object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = mobj object_spec.skip = False property_filter_spec.propSet = [property_spec] property_filter_spec.objectSet = [object_spec] options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx(usecoll, specSet=[property_filter_spec], options=options)
def get_dynamic_property(vim, mobj, type, property_name): 'Gets a particular property of the Managed Object.' property_dict = get_dynamic_properties(vim, mobj, type, [property_name]) return property_dict.get(property_name)
1,675,623,835,722,089,500
Gets a particular property of the Managed Object.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_dynamic_property
Hybrid-Cloud/badam
python
def get_dynamic_property(vim, mobj, type, property_name): property_dict = get_dynamic_properties(vim, mobj, type, [property_name]) return property_dict.get(property_name)
def get_dynamic_properties(vim, mobj, type, property_names): 'Gets the specified properties of the Managed Object.' obj_content = get_object_properties(vim, None, mobj, type, property_names) if (obj_content is None): return {} if hasattr(obj_content, 'token'): cancel_retrieve(vim, obj_content.token) property_dict = {} if obj_content.objects: if hasattr(obj_content.objects[0], 'propSet'): dynamic_properties = obj_content.objects[0].propSet if dynamic_properties: for prop in dynamic_properties: property_dict[prop.name] = prop.val if hasattr(obj_content.objects[0], 'missingSet'): for m in obj_content.objects[0].missingSet: LOG.warning(_('Unable to retrieve value for %(path)s Reason: %(reason)s'), {'path': m.path, 'reason': m.fault.localizedMessage}) return property_dict
-5,690,402,733,005,168,000
Gets the specified properties of the Managed Object.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_dynamic_properties
Hybrid-Cloud/badam
python
def get_dynamic_properties(vim, mobj, type, property_names): obj_content = get_object_properties(vim, None, mobj, type, property_names) if (obj_content is None): return {} if hasattr(obj_content, 'token'): cancel_retrieve(vim, obj_content.token) property_dict = {} if obj_content.objects: if hasattr(obj_content.objects[0], 'propSet'): dynamic_properties = obj_content.objects[0].propSet if dynamic_properties: for prop in dynamic_properties: property_dict[prop.name] = prop.val if hasattr(obj_content.objects[0], 'missingSet'): for m in obj_content.objects[0].missingSet: LOG.warning(_('Unable to retrieve value for %(path)s Reason: %(reason)s'), {'path': m.path, 'reason': m.fault.localizedMessage}) return property_dict
def get_objects(vim, type, properties_to_collect=None, all=False): 'Gets the list of objects of the type specified.' return vutil.get_objects(vim, type, CONF.vmware.maximum_objects, properties_to_collect, all)
8,382,414,298,554,375,000
Gets the list of objects of the type specified.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_objects
Hybrid-Cloud/badam
python
def get_objects(vim, type, properties_to_collect=None, all=False): return vutil.get_objects(vim, type, CONF.vmware.maximum_objects, properties_to_collect, all)
def get_inner_objects(vim, base_obj, path, inner_type, properties_to_collect=None, all=False): 'Gets the list of inner objects of the type specified.' client_factory = vim.client.factory base_type = base_obj._type traversal_spec = vutil.build_traversal_spec(client_factory, 'inner', base_type, path, False, []) object_spec = vutil.build_object_spec(client_factory, base_obj, [traversal_spec]) property_spec = vutil.build_property_spec(client_factory, type_=inner_type, properties_to_collect=properties_to_collect, all_properties=all) property_filter_spec = vutil.build_property_filter_spec(client_factory, [property_spec], [object_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx(vim.service_content.propertyCollector, specSet=[property_filter_spec], options=options)
3,725,338,895,588,128,000
Gets the list of inner objects of the type specified.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_inner_objects
Hybrid-Cloud/badam
python
def get_inner_objects(vim, base_obj, path, inner_type, properties_to_collect=None, all=False): client_factory = vim.client.factory base_type = base_obj._type traversal_spec = vutil.build_traversal_spec(client_factory, 'inner', base_type, path, False, []) object_spec = vutil.build_object_spec(client_factory, base_obj, [traversal_spec]) property_spec = vutil.build_property_spec(client_factory, type_=inner_type, properties_to_collect=properties_to_collect, all_properties=all) property_filter_spec = vutil.build_property_filter_spec(client_factory, [property_spec], [object_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx(vim.service_content.propertyCollector, specSet=[property_filter_spec], options=options)
def cancel_retrieve(vim, token): 'Cancels the retrieve operation.' return vim.CancelRetrievePropertiesEx(vim.service_content.propertyCollector, token=token)
-7,733,806,174,974,513,000
Cancels the retrieve operation.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
cancel_retrieve
Hybrid-Cloud/badam
python
def cancel_retrieve(vim, token): return vim.CancelRetrievePropertiesEx(vim.service_content.propertyCollector, token=token)
def continue_to_get_objects(vim, token): 'Continues to get the list of objects of the type specified.' return vim.ContinueRetrievePropertiesEx(vim.service_content.propertyCollector, token=token)
-4,253,159,307,787,871,000
Continues to get the list of objects of the type specified.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
continue_to_get_objects
Hybrid-Cloud/badam
python
def continue_to_get_objects(vim, token): return vim.ContinueRetrievePropertiesEx(vim.service_content.propertyCollector, token=token)
def get_prop_spec(client_factory, spec_type, properties): 'Builds the Property Spec Object.' prop_spec = client_factory.create('ns0:PropertySpec') prop_spec.type = spec_type prop_spec.pathSet = properties return prop_spec
-2,410,048,619,987,145,000
Builds the Property Spec Object.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_prop_spec
Hybrid-Cloud/badam
python
def get_prop_spec(client_factory, spec_type, properties): prop_spec = client_factory.create('ns0:PropertySpec') prop_spec.type = spec_type prop_spec.pathSet = properties return prop_spec
def get_obj_spec(client_factory, obj, select_set=None): 'Builds the Object Spec object.' obj_spec = client_factory.create('ns0:ObjectSpec') obj_spec.obj = obj obj_spec.skip = False if (select_set is not None): obj_spec.selectSet = select_set return obj_spec
3,381,832,873,053,010,000
Builds the Object Spec object.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_obj_spec
Hybrid-Cloud/badam
python
def get_obj_spec(client_factory, obj, select_set=None): obj_spec = client_factory.create('ns0:ObjectSpec') obj_spec.obj = obj obj_spec.skip = False if (select_set is not None): obj_spec.selectSet = select_set return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec): 'Builds the Property Filter Spec Object.' prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec
2,964,159,198,052,121,600
Builds the Property Filter Spec Object.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_prop_filter_spec
Hybrid-Cloud/badam
python
def get_prop_filter_spec(client_factory, obj_spec, prop_spec): prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties): 'Gets the list of properties for the collection of\n objects of the type specified.\n ' client_factory = vim.client.factory if (len(obj_list) == 0): return [] prop_spec = get_prop_spec(client_factory, type, properties) lst_obj_specs = [] for obj in obj_list: lst_obj_specs.append(get_obj_spec(client_factory, obj)) prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs, [prop_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx(vim.service_content.propertyCollector, specSet=[prop_filter_spec], options=options)
-3,855,717,937,075,148,000
Gets the list of properties for the collection of objects of the type specified.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_properties_for_a_collection_of_objects
Hybrid-Cloud/badam
python
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties): 'Gets the list of properties for the collection of\n objects of the type specified.\n ' client_factory = vim.client.factory if (len(obj_list) == 0): return [] prop_spec = get_prop_spec(client_factory, type, properties) lst_obj_specs = [] for obj in obj_list: lst_obj_specs.append(get_obj_spec(client_factory, obj)) prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs, [prop_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx(vim.service_content.propertyCollector, specSet=[prop_filter_spec], options=options)
def get_about_info(vim): 'Get the About Info from the service content.' return vim.service_content.about
-3,403,545,440,397,797,000
Get the About Info from the service content.
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
get_about_info
Hybrid-Cloud/badam
python
def get_about_info(vim): return vim.service_content.about
def validate_view_arguments(project_name=None, ontology_key=None, document_type=None): '\n extends the "validate_view_arguments" fn in "views_base"\n by adding a check that there is a default customization associated w/ this project/ontology/proxy\n :param project_name:\n :param ontology_key:\n :param document_type:\n :return:\n ' model_customization = None (validity, project, ontology, model_proxy, msg) = validate_view_arguments_base(project_name=project_name, ontology_key=ontology_key, document_type=document_type) if (not validity): return (validity, project, ontology, model_proxy, model_customization, msg) try: model_customization = QModelCustomization.objects.get(project=project, proxy=model_proxy, is_default=True) except ObjectDoesNotExist: msg = _("There is no default customization associated with this document type for this project.<br/>Please <a href='mailto:{0}?subject=Missing%20Customization&body=Please%20create%20a%20customization%20for%20the%20%22{1}%22%20document%20type.'>contact</a> the project administrator for assistance.").format(project.email, model_proxy.fully_qualified_name) validity = False return (validity, project, ontology, model_proxy, model_customization, msg) return (validity, project, ontology, model_proxy, model_customization, msg)
7,049,328,265,247,111,000
extends the "validate_view_arguments" fn in "views_base" by adding a check that there is a default customization associated w/ this project/ontology/proxy :param project_name: :param ontology_key: :param document_type: :return:
Q/questionnaire/views/views_realizations.py
validate_view_arguments
ES-DOC/esdoc-questionnaire
python
def validate_view_arguments(project_name=None, ontology_key=None, document_type=None): '\n extends the "validate_view_arguments" fn in "views_base"\n by adding a check that there is a default customization associated w/ this project/ontology/proxy\n :param project_name:\n :param ontology_key:\n :param document_type:\n :return:\n ' model_customization = None (validity, project, ontology, model_proxy, msg) = validate_view_arguments_base(project_name=project_name, ontology_key=ontology_key, document_type=document_type) if (not validity): return (validity, project, ontology, model_proxy, model_customization, msg) try: model_customization = QModelCustomization.objects.get(project=project, proxy=model_proxy, is_default=True) except ObjectDoesNotExist: msg = _("There is no default customization associated with this document type for this project.<br/>Please <a href='mailto:{0}?subject=Missing%20Customization&body=Please%20create%20a%20customization%20for%20the%20%22{1}%22%20document%20type.'>contact</a> the project administrator for assistance.").format(project.email, model_proxy.fully_qualified_name) validity = False return (validity, project, ontology, model_proxy, model_customization, msg) return (validity, project, ontology, model_proxy, model_customization, msg)
@redirect_legacy_projects def q_view_new(request, project_name=None, ontology_key=None, document_type=None): '\n this is never exposed by templates\n but a user might still try to navigate explicitly to this URL\n just return an error telling them not to try that\n :param request:\n :param project_name:\n :param ontology_key:\n :param document_type:\n :return:\n ' context = add_parameters_to_context(request) (validity, project, ontology, model_proxy, model_customization, msg) = validate_view_arguments(project_name=project_name, ontology_key=ontology_key, document_type=document_type) if (not validity): return q_error(request, msg) msg = 'The ES-DOC Questionnaire only supports viewing of <em>existing</em> documents.' return q_error(request, msg)
-2,959,830,592,501,942,300
this is never exposed by templates but a user might still try to navigate explicitly to this URL just return an error telling them not to try that :param request: :param project_name: :param ontology_key: :param document_type: :return:
Q/questionnaire/views/views_realizations.py
q_view_new
ES-DOC/esdoc-questionnaire
python
@redirect_legacy_projects def q_view_new(request, project_name=None, ontology_key=None, document_type=None): '\n this is never exposed by templates\n but a user might still try to navigate explicitly to this URL\n just return an error telling them not to try that\n :param request:\n :param project_name:\n :param ontology_key:\n :param document_type:\n :return:\n ' context = add_parameters_to_context(request) (validity, project, ontology, model_proxy, model_customization, msg) = validate_view_arguments(project_name=project_name, ontology_key=ontology_key, document_type=document_type) if (not validity): return q_error(request, msg) msg = 'The ES-DOC Questionnaire only supports viewing of <em>existing</em> documents.' return q_error(request, msg)
@redirect_legacy_projects def q_view_existing(request, project_name=None, ontology_key=None, document_type=None, realization_pk=None): '\n this is exactly the same as "q_edit_existing" except:\n there are no authentication checks,\n the template_context & template are different.\n :param request:\n :param project_name:\n :param ontology_key:\n :param document_type:\n :param realization_pk:\n :return:\n ' context = add_parameters_to_context(request) (validity, project, ontology, model_proxy, model_customization, msg) = validate_view_arguments(project_name=project_name, ontology_key=ontology_key, document_type=document_type) if (not validity): return q_error(request, msg) try: session_key = get_key_from_request(request) cached_realizations_key = '{0}_realizations'.format(session_key) model_realization = get_or_create_cached_object(request.session, cached_realizations_key, get_existing_realizations, **{'project': project, 'ontology': ontology, 'model_proxy': model_proxy, 'model_id': realization_pk}) except ObjectDoesNotExist: msg = "Cannot find a document with an id of '{0}' for that project/ontology/document type combination.".format(realization_pk) return q_error(request, msg) view_url_dirname = request.path.rsplit('/', 1)[0] api_url_dirname = reverse('realization-detail', kwargs={'pk': model_realization.pk}).rsplit('/', 2)[0] template_context = {'project': project, 'ontology': ontology, 'proxy': model_proxy, 'view_url_dirname': view_url_dirname, 'api_url_dirname': api_url_dirname, 'session_key': session_key, 'customization': model_customization, 'realization': model_realization, 'read_only': 'true'} return render_to_response('questionnaire/q_view.html', template_context, context_instance=context)
7,441,352,269,681,091,000
this is exactly the same as "q_edit_existing" except: there are no authentication checks, the template_context & template are different. :param request: :param project_name: :param ontology_key: :param document_type: :param realization_pk: :return:
Q/questionnaire/views/views_realizations.py
q_view_existing
ES-DOC/esdoc-questionnaire
python
@redirect_legacy_projects def q_view_existing(request, project_name=None, ontology_key=None, document_type=None, realization_pk=None): '\n this is exactly the same as "q_edit_existing" except:\n there are no authentication checks,\n the template_context & template are different.\n :param request:\n :param project_name:\n :param ontology_key:\n :param document_type:\n :param realization_pk:\n :return:\n ' context = add_parameters_to_context(request) (validity, project, ontology, model_proxy, model_customization, msg) = validate_view_arguments(project_name=project_name, ontology_key=ontology_key, document_type=document_type) if (not validity): return q_error(request, msg) try: session_key = get_key_from_request(request) cached_realizations_key = '{0}_realizations'.format(session_key) model_realization = get_or_create_cached_object(request.session, cached_realizations_key, get_existing_realizations, **{'project': project, 'ontology': ontology, 'model_proxy': model_proxy, 'model_id': realization_pk}) except ObjectDoesNotExist: msg = "Cannot find a document with an id of '{0}' for that project/ontology/document type combination.".format(realization_pk) return q_error(request, msg) view_url_dirname = request.path.rsplit('/', 1)[0] api_url_dirname = reverse('realization-detail', kwargs={'pk': model_realization.pk}).rsplit('/', 2)[0] template_context = {'project': project, 'ontology': ontology, 'proxy': model_proxy, 'view_url_dirname': view_url_dirname, 'api_url_dirname': api_url_dirname, 'session_key': session_key, 'customization': model_customization, 'realization': model_realization, 'read_only': 'true'} return render_to_response('questionnaire/q_view.html', template_context, context_instance=context)
@redirect_legacy_projects def q_get_existing(request, project_name=None, ontology_key=None, document_type=None): '\n this is meant to be used from external requests (ie: further_info_url)\n where uniquely identifying model fields (including pk) are passed\n if a unique realization cannot be found then an error is returned\n otherwise the response is routed to "q_edit_existing"\n :param request:\n :param project_name:\n :param ontology_key:\n :param document_type:\n :param realization_pk:\n :return:\n ' (validity, project, ontology, model_proxy, model_customization, msg) = validate_view_arguments(project_name=project_name, ontology_key=ontology_key, document_type=document_type) if (not validity): return q_error(request, msg) model_realizations = QModelRealization.objects.filter(project=project, proxy=model_proxy) additional_parameters = request.GET.copy() for (key, value) in additional_parameters.iteritems(): if ((key == 'pk') or (key == 'guid')): try: return HttpResponseRedirect(reverse('edit_existing', kwargs={'project_name': project_name, 'ontology_key': ontology_key, 'document_type': document_type, 'realization_pk': model_realizations.get(**{key: value}).pk})) except (ObjectDoesNotExist, ValueError): msg = "There is no '{0}' document with a {1} of '{2}'".format(model_proxy, key, value) return q_error(request, msg) else: try: property_proxy = model_proxy.property_proxies.get(name=key) if (property_proxy.field_type == 'ATOMIC'): model_realizations = model_realizations.filter(properties__proxy=property_proxy).has_atomic_value(value) elif (property_proxy.field_type == 'ENUMERATION'): formatted_values = [fv for fv in map((lambda v: v.strip()), value.split(',')) if fv] model_realizations = model_realizations.filter(properties__proxy=property_proxy).has_enumeration_values(formatted_values) else: msg = 'Unable to support getting a document by relationship_field' return q_error(request, msg) except ObjectDoesNotExist: msg = "There is no '{0}' property for the '{0}' document_type".format(key, model_proxy) return q_error(request, msg) if (model_realizations.count() != 1): msg = "Unable to uniquely identify '{0}' document_type with the following properties: '{1}'".format(model_proxy, ', '.join(['{0}: {1}'.format(p[0], p[1]) for p in additional_parameters.items()])) return q_error(request, msg) return HttpResponseRedirect(reverse('edit_existing', kwargs={'project_name': project_name, 'ontology_key': ontology_key, 'document_type': document_type, 'realization_pk': model_realizations.first().pk}))
-1,087,697,355,295,826,700
this is meant to be used from external requests (ie: further_info_url) where uniquely identifying model fields (including pk) are passed if a unique realization cannot be found then an error is returned otherwise the response is routed to "q_edit_existing" :param request: :param project_name: :param ontology_key: :param document_type: :param realization_pk: :return:
Q/questionnaire/views/views_realizations.py
q_get_existing
ES-DOC/esdoc-questionnaire
python
@redirect_legacy_projects def q_get_existing(request, project_name=None, ontology_key=None, document_type=None): '\n this is meant to be used from external requests (ie: further_info_url)\n where uniquely identifying model fields (including pk) are passed\n if a unique realization cannot be found then an error is returned\n otherwise the response is routed to "q_edit_existing"\n :param request:\n :param project_name:\n :param ontology_key:\n :param document_type:\n :param realization_pk:\n :return:\n ' (validity, project, ontology, model_proxy, model_customization, msg) = validate_view_arguments(project_name=project_name, ontology_key=ontology_key, document_type=document_type) if (not validity): return q_error(request, msg) model_realizations = QModelRealization.objects.filter(project=project, proxy=model_proxy) additional_parameters = request.GET.copy() for (key, value) in additional_parameters.iteritems(): if ((key == 'pk') or (key == 'guid')): try: return HttpResponseRedirect(reverse('edit_existing', kwargs={'project_name': project_name, 'ontology_key': ontology_key, 'document_type': document_type, 'realization_pk': model_realizations.get(**{key: value}).pk})) except (ObjectDoesNotExist, ValueError): msg = "There is no '{0}' document with a {1} of '{2}'".format(model_proxy, key, value) return q_error(request, msg) else: try: property_proxy = model_proxy.property_proxies.get(name=key) if (property_proxy.field_type == 'ATOMIC'): model_realizations = model_realizations.filter(properties__proxy=property_proxy).has_atomic_value(value) elif (property_proxy.field_type == 'ENUMERATION'): formatted_values = [fv for fv in map((lambda v: v.strip()), value.split(',')) if fv] model_realizations = model_realizations.filter(properties__proxy=property_proxy).has_enumeration_values(formatted_values) else: msg = 'Unable to support getting a document by relationship_field' return q_error(request, msg) except ObjectDoesNotExist: msg = "There is no '{0}' property for the '{0}' document_type".format(key, model_proxy) return q_error(request, msg) if (model_realizations.count() != 1): msg = "Unable to uniquely identify '{0}' document_type with the following properties: '{1}'".format(model_proxy, ', '.join(['{0}: {1}'.format(p[0], p[1]) for p in additional_parameters.items()])) return q_error(request, msg) return HttpResponseRedirect(reverse('edit_existing', kwargs={'project_name': project_name, 'ontology_key': ontology_key, 'document_type': document_type, 'realization_pk': model_realizations.first().pk}))
def combineCommandLineOptionsDictIntoShellCommand(commandOptions): '\n Write out the compas input parameters into a shell string.\n Ensure the Compas executable is first, and not repeated.\n Options are non-ordered.\n ' shellCommand = commandOptions['compas_executable'] del commandOptions['compas_executable'] for (key, val) in commandOptions.items(): shellCommand += (((' ' + key) + ' ') + val) return shellCommand
-6,617,363,741,634,693,000
Write out the compas input parameters into a shell string. Ensure the Compas executable is first, and not repeated. Options are non-ordered.
utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py
combineCommandLineOptionsDictIntoShellCommand
IsobelMarguarethe/COMPAS
python
def combineCommandLineOptionsDictIntoShellCommand(commandOptions): '\n Write out the compas input parameters into a shell string.\n Ensure the Compas executable is first, and not repeated.\n Options are non-ordered.\n ' shellCommand = commandOptions['compas_executable'] del commandOptions['compas_executable'] for (key, val) in commandOptions.items(): shellCommand += (((' ' + key) + ' ') + val) return shellCommand
def cleanStringParameter(str_param): ' clean up string parameters to avoid confusing Boost ' if (str_param is not None): str_param = str_param.strip('\'"') escapes = [' ', "'", '"'] for escape in escapes: str_param = re.sub('(?<!\\\\){}'.format(escape), '\\{}'.format(escape), str_param) return str_param
-4,032,103,620,565,185,500
clean up string parameters to avoid confusing Boost
utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py
cleanStringParameter
IsobelMarguarethe/COMPAS
python
def cleanStringParameter(str_param): ' ' if (str_param is not None): str_param = str_param.strip('\'"') escapes = [' ', "'", '"'] for escape in escapes: str_param = re.sub('(?<!\\\\){}'.format(escape), '\\{}'.format(escape), str_param) return str_param
def generateCommandLineOptionsDict(self): '\n This function generates a dictionary mapping COMPAS options to their specified \n values (or empty strings for boolean options). These can be combined into a string\n and run directly as a terminal command, or passed to the stroopwafel interface\n where some of them may be overwritten. Options not to be included in the command \n line should be set to pythons None (except booleans, which should be set to False)\n\n Parameters\n -----------\n self : pythonProgramOptions\n Contains program options\n\n Returns\n --------\n commands : str or list of strs\n ' booleanChoices = self.booleanChoices() booleanCommands = self.booleanCommands() nBoolean = len(booleanChoices) assert (len(booleanCommands) == nBoolean) numericalChoices = self.numericalChoices() numericalCommands = self.numericalCommands() nNumerical = len(numericalChoices) assert (len(numericalCommands) == nNumerical) stringChoices = self.stringChoices() stringCommands = self.stringCommands() nString = len(stringChoices) assert (len(stringCommands) == nString) listChoices = self.listChoices() listCommands = self.listCommands() nList = len(listChoices) assert (len(listCommands) == nList) command = {'compas_executable': self.compas_executable} for i in range(nBoolean): if (booleanChoices[i] == True): command.update({booleanCommands[i]: ''}) elif (booleanChoices[i] == False): command.update({booleanCommands[i]: 'False'}) for i in range(nNumerical): if (not (numericalChoices[i] == None)): command.update({numericalCommands[i]: str(numericalChoices[i])}) for i in range(nString): if (not (stringChoices[i] == None)): command.update({stringCommands[i]: cleanStringParameter(stringChoices[i])}) for i in range(nList): if listChoices[i]: command.update({listCommands[i]: ' '.join(map(str, listChoices[i]))}) return command
400,721,765,994,395,800
This function generates a dictionary mapping COMPAS options to their specified values (or empty strings for boolean options). These can be combined into a string and run directly as a terminal command, or passed to the stroopwafel interface where some of them may be overwritten. Options not to be included in the command line should be set to pythons None (except booleans, which should be set to False) Parameters ----------- self : pythonProgramOptions Contains program options Returns -------- commands : str or list of strs
utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py
generateCommandLineOptionsDict
IsobelMarguarethe/COMPAS
python
def generateCommandLineOptionsDict(self): '\n This function generates a dictionary mapping COMPAS options to their specified \n values (or empty strings for boolean options). These can be combined into a string\n and run directly as a terminal command, or passed to the stroopwafel interface\n where some of them may be overwritten. Options not to be included in the command \n line should be set to pythons None (except booleans, which should be set to False)\n\n Parameters\n -----------\n self : pythonProgramOptions\n Contains program options\n\n Returns\n --------\n commands : str or list of strs\n ' booleanChoices = self.booleanChoices() booleanCommands = self.booleanCommands() nBoolean = len(booleanChoices) assert (len(booleanCommands) == nBoolean) numericalChoices = self.numericalChoices() numericalCommands = self.numericalCommands() nNumerical = len(numericalChoices) assert (len(numericalCommands) == nNumerical) stringChoices = self.stringChoices() stringCommands = self.stringCommands() nString = len(stringChoices) assert (len(stringCommands) == nString) listChoices = self.listChoices() listCommands = self.listCommands() nList = len(listChoices) assert (len(listCommands) == nList) command = {'compas_executable': self.compas_executable} for i in range(nBoolean): if (booleanChoices[i] == True): command.update({booleanCommands[i]: }) elif (booleanChoices[i] == False): command.update({booleanCommands[i]: 'False'}) for i in range(nNumerical): if (not (numericalChoices[i] == None)): command.update({numericalCommands[i]: str(numericalChoices[i])}) for i in range(nString): if (not (stringChoices[i] == None)): command.update({stringCommands[i]: cleanStringParameter(stringChoices[i])}) for i in range(nList): if listChoices[i]: command.update({listCommands[i]: ' '.join(map(str, listChoices[i]))}) return command
def get_header(self, request): '\n Extracts the header containing the JSON web token from the given\n request.\n ' header = request.META.get(api_settings.AUTH_HEADER_NAME) if isinstance(header, str): header = header.encode(HTTP_HEADER_ENCODING) return header
1,066,136,120,163,030,000
Extracts the header containing the JSON web token from the given request.
webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py
get_header
BrianMarquez3/Python-Django
python
def get_header(self, request): '\n Extracts the header containing the JSON web token from the given\n request.\n ' header = request.META.get(api_settings.AUTH_HEADER_NAME) if isinstance(header, str): header = header.encode(HTTP_HEADER_ENCODING) return header
def get_raw_token(self, header): '\n Extracts an unvalidated JSON web token from the given "Authorization"\n header value.\n ' parts = header.split() if (len(parts) == 0): return None if (parts[0] not in AUTH_HEADER_TYPE_BYTES): return None if (len(parts) != 2): raise AuthenticationFailed(_('Authorization header must contain two space-delimited values'), code='bad_authorization_header') return parts[1]
4,438,926,674,893,976,000
Extracts an unvalidated JSON web token from the given "Authorization" header value.
webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py
get_raw_token
BrianMarquez3/Python-Django
python
def get_raw_token(self, header): '\n Extracts an unvalidated JSON web token from the given "Authorization"\n header value.\n ' parts = header.split() if (len(parts) == 0): return None if (parts[0] not in AUTH_HEADER_TYPE_BYTES): return None if (len(parts) != 2): raise AuthenticationFailed(_('Authorization header must contain two space-delimited values'), code='bad_authorization_header') return parts[1]
def get_validated_token(self, raw_token): '\n Validates an encoded JSON web token and returns a validated token\n wrapper object.\n ' messages = [] for AuthToken in api_settings.AUTH_TOKEN_CLASSES: try: return AuthToken(raw_token) except TokenError as e: messages.append({'token_class': AuthToken.__name__, 'token_type': AuthToken.token_type, 'message': e.args[0]}) raise InvalidToken({'detail': _('Given token not valid for any token type'), 'messages': messages})
-2,122,270,758,756,231,000
Validates an encoded JSON web token and returns a validated token wrapper object.
webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py
get_validated_token
BrianMarquez3/Python-Django
python
def get_validated_token(self, raw_token): '\n Validates an encoded JSON web token and returns a validated token\n wrapper object.\n ' messages = [] for AuthToken in api_settings.AUTH_TOKEN_CLASSES: try: return AuthToken(raw_token) except TokenError as e: messages.append({'token_class': AuthToken.__name__, 'token_type': AuthToken.token_type, 'message': e.args[0]}) raise InvalidToken({'detail': _('Given token not valid for any token type'), 'messages': messages})
def get_user(self, validated_token): '\n Attempts to find and return a user using the given validated token.\n ' try: user_id = validated_token[api_settings.USER_ID_CLAIM] except KeyError: raise InvalidToken(_('Token contained no recognizable user identification')) try: user = self.user_model.objects.get(**{api_settings.USER_ID_FIELD: user_id}) except self.user_model.DoesNotExist: raise AuthenticationFailed(_('User not found'), code='user_not_found') if (not user.is_active): raise AuthenticationFailed(_('User is inactive'), code='user_inactive') return user
8,316,481,509,880,417,000
Attempts to find and return a user using the given validated token.
webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py
get_user
BrianMarquez3/Python-Django
python
def get_user(self, validated_token): '\n \n ' try: user_id = validated_token[api_settings.USER_ID_CLAIM] except KeyError: raise InvalidToken(_('Token contained no recognizable user identification')) try: user = self.user_model.objects.get(**{api_settings.USER_ID_FIELD: user_id}) except self.user_model.DoesNotExist: raise AuthenticationFailed(_('User not found'), code='user_not_found') if (not user.is_active): raise AuthenticationFailed(_('User is inactive'), code='user_inactive') return user
def get_user(self, validated_token): '\n Returns a stateless user object which is backed by the given validated\n token.\n ' if (api_settings.USER_ID_CLAIM not in validated_token): raise InvalidToken(_('Token contained no recognizable user identification')) return api_settings.TOKEN_USER_CLASS(validated_token)
1,161,550,880,086,617,600
Returns a stateless user object which is backed by the given validated token.
webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py
get_user
BrianMarquez3/Python-Django
python
def get_user(self, validated_token): '\n Returns a stateless user object which is backed by the given validated\n token.\n ' if (api_settings.USER_ID_CLAIM not in validated_token): raise InvalidToken(_('Token contained no recognizable user identification')) return api_settings.TOKEN_USER_CLASS(validated_token)
def _init_state(self, encoder_hidden): ' Initialize the encoder hidden state. ' if (encoder_hidden is None): return None if isinstance(encoder_hidden, tuple): encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden]) else: encoder_hidden = self._cat_directions(encoder_hidden) return encoder_hidden
-888,232,364,086,648,400
Initialize the encoder hidden state.
seq2seq/models/DecoderRNN.py
_init_state
junyongk/pytorch-seq2seq
python
def _init_state(self, encoder_hidden): ' ' if (encoder_hidden is None): return None if isinstance(encoder_hidden, tuple): encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden]) else: encoder_hidden = self._cat_directions(encoder_hidden) return encoder_hidden
def _cat_directions(self, h): ' If the encoder is bidirectional, do the following transformation.\n (#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size)\n ' if self.bidirectional_encoder: h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2) return h
1,430,779,244,302,292,700
If the encoder is bidirectional, do the following transformation. (#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size)
seq2seq/models/DecoderRNN.py
_cat_directions
junyongk/pytorch-seq2seq
python
def _cat_directions(self, h): ' If the encoder is bidirectional, do the following transformation.\n (#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size)\n ' if self.bidirectional_encoder: h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2) return h
def parse(self) -> object: ' reads in the file, then parses record tables' self.contents = self.f.read() self.header = self.parseHeader() self.records = self.parseRecordInfoList() self.readRecord0()
3,804,246,371,784,762,400
reads in the file, then parses record tables
dbookbee/mobi/__init__.py
parse
cloudylan/dbooklib
python
def parse(self) -> object: ' ' self.contents = self.f.read() self.header = self.parseHeader() self.records = self.parseRecordInfoList() self.readRecord0()
def author(self): 'Returns the author of the book' return self.config['exth']['records'][100]
5,083,589,889,736,570,000
Returns the author of the book
dbookbee/mobi/__init__.py
author
cloudylan/dbooklib
python
def author(self): return self.config['exth']['records'][100]
def title(self): 'Returns the title of the book' return self.config['mobi']['Full Name']
943,860,609,546,996,200
Returns the title of the book
dbookbee/mobi/__init__.py
title
cloudylan/dbooklib
python
def title(self): return self.config['mobi']['Full Name']
async def begin_delete(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs) -> AsyncLROPoller[None]: 'Deletes the specified network security rule.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_security_group_name: The name of the network security group.\n :type network_security_group_name: str\n :param security_rule_name: The name of the security rule.\n :type security_rule_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, network_security_group_name=network_security_group_name, security_rule_name=security_rule_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if (polling is True): polling_method = AsyncARMPolling(lro_delay, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
-2,860,460,665,875,554,300
Deletes the specified network security rule. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :param security_rule_name: The name of the security rule. :type security_rule_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError:
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py
begin_delete
Co0olboi/azure-sdk-for-python
python
async def begin_delete(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs) -> AsyncLROPoller[None]: 'Deletes the specified network security rule.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_security_group_name: The name of the network security group.\n :type network_security_group_name: str\n :param security_rule_name: The name of the security rule.\n :type security_rule_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, network_security_group_name=network_security_group_name, security_rule_name=security_rule_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if (polling is True): polling_method = AsyncARMPolling(lro_delay, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def get(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs) -> 'models.SecurityRule': 'Get the specified network security rule.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_security_group_name: The name of the network security group.\n :type network_security_group_name: str\n :param security_rule_name: The name of the security rule.\n :type security_rule_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: SecurityRule, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2018_07_01.models.SecurityRule\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2018-07-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url('network_security_group_name', network_security_group_name, 'str'), 'securityRuleName': self._serialize.url('security_rule_name', security_rule_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('SecurityRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
-431,310,436,554,868,800
Get the specified network security rule. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :param security_rule_name: The name of the security rule. :type security_rule_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: SecurityRule, or the result of cls(response) :rtype: ~azure.mgmt.network.v2018_07_01.models.SecurityRule :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py
get
Co0olboi/azure-sdk-for-python
python
async def get(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs) -> 'models.SecurityRule': 'Get the specified network security rule.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_security_group_name: The name of the network security group.\n :type network_security_group_name: str\n :param security_rule_name: The name of the security rule.\n :type security_rule_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: SecurityRule, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2018_07_01.models.SecurityRule\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2018-07-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url('network_security_group_name', network_security_group_name, 'str'), 'securityRuleName': self._serialize.url('security_rule_name', security_rule_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('SecurityRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
async def begin_create_or_update(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, security_rule_parameters: 'models.SecurityRule', **kwargs) -> AsyncLROPoller['models.SecurityRule']: 'Creates or updates a security rule in the specified network security group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_security_group_name: The name of the network security group.\n :type network_security_group_name: str\n :param security_rule_name: The name of the security rule.\n :type security_rule_name: str\n :param security_rule_parameters: Parameters supplied to the create or update network security\n rule operation.\n :type security_rule_parameters: ~azure.mgmt.network.v2018_07_01.models.SecurityRule\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either SecurityRule or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.SecurityRule]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, network_security_group_name=network_security_group_name, security_rule_name=security_rule_name, security_rule_parameters=security_rule_parameters, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('SecurityRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if (polling is True): polling_method = AsyncARMPolling(lro_delay, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
-7,602,824,364,702,028,000
Creates or updates a security rule in the specified network security group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :param security_rule_name: The name of the security rule. :type security_rule_name: str :param security_rule_parameters: Parameters supplied to the create or update network security rule operation. :type security_rule_parameters: ~azure.mgmt.network.v2018_07_01.models.SecurityRule :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either SecurityRule or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.SecurityRule] :raises ~azure.core.exceptions.HttpResponseError:
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py
begin_create_or_update
Co0olboi/azure-sdk-for-python
python
async def begin_create_or_update(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, security_rule_parameters: 'models.SecurityRule', **kwargs) -> AsyncLROPoller['models.SecurityRule']: 'Creates or updates a security rule in the specified network security group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_security_group_name: The name of the network security group.\n :type network_security_group_name: str\n :param security_rule_name: The name of the security rule.\n :type security_rule_name: str\n :param security_rule_parameters: Parameters supplied to the create or update network security\n rule operation.\n :type security_rule_parameters: ~azure.mgmt.network.v2018_07_01.models.SecurityRule\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either SecurityRule or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.SecurityRule]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, network_security_group_name=network_security_group_name, security_rule_name=security_rule_name, security_rule_parameters=security_rule_parameters, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('SecurityRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if (polling is True): polling_method = AsyncARMPolling(lro_delay, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
def list(self, resource_group_name: str, network_security_group_name: str, **kwargs) -> AsyncIterable['models.SecurityRuleListResult']: 'Gets all security rules in a network security group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_security_group_name: The name of the network security group.\n :type network_security_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.SecurityRuleListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2018-07-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url('network_security_group_name', network_security_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('SecurityRuleListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
-2,965,344,165,562,121,700
Gets all security rules in a network security group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SecurityRuleListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.SecurityRuleListResult] :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py
list
Co0olboi/azure-sdk-for-python
python
def list(self, resource_group_name: str, network_security_group_name: str, **kwargs) -> AsyncIterable['models.SecurityRuleListResult']: 'Gets all security rules in a network security group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_security_group_name: The name of the network security group.\n :type network_security_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.SecurityRuleListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2018-07-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url('network_security_group_name', network_security_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('SecurityRuleListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
@classmethod def file_to_delete(cls, all_files=False): '\n Return the list of file to delete.\n ' directory = '{0}{1}'.format(PyFunceble.OUTPUT_DIRECTORY, PyFunceble.OUTPUTS.parent_directory) if (not directory.endswith(directory_separator)): directory += directory_separator result = [] for (root, _, files) in walk(directory): for file in files: if (file in ['.gitignore', '.keep']): continue if ((not all_files) and ('logs' in root) and ('.log' in file)): continue if root.endswith(directory_separator): result.append((root + file)) else: result.append(((root + directory_separator) + file)) return result
-7,448,717,146,557,944,000
Return the list of file to delete.
PyFunceble/output/clean.py
file_to_delete
NeolithEra/PyFunceble
python
@classmethod def file_to_delete(cls, all_files=False): '\n \n ' directory = '{0}{1}'.format(PyFunceble.OUTPUT_DIRECTORY, PyFunceble.OUTPUTS.parent_directory) if (not directory.endswith(directory_separator)): directory += directory_separator result = [] for (root, _, files) in walk(directory): for file in files: if (file in ['.gitignore', '.keep']): continue if ((not all_files) and ('logs' in root) and ('.log' in file)): continue if root.endswith(directory_separator): result.append((root + file)) else: result.append(((root + directory_separator) + file)) return result
@classmethod def databases_to_delete(cls): '\n Set the databases files to delete.\n ' result = [] if (PyFunceble.CONFIGURATION.db_type == 'json'): directory = PyFunceble.CONFIG_DIRECTORY result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.dir_structure)) result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.iana)) result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.public_suffix)) result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.inactive_db)) result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.mining)) return result
2,857,564,988,682,521,600
Set the databases files to delete.
PyFunceble/output/clean.py
databases_to_delete
NeolithEra/PyFunceble
python
@classmethod def databases_to_delete(cls): '\n \n ' result = [] if (PyFunceble.CONFIGURATION.db_type == 'json'): directory = PyFunceble.CONFIG_DIRECTORY result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.dir_structure)) result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.iana)) result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.public_suffix)) result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.inactive_db)) result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.mining)) return result
def almost_everything(self, clean_all=False, file_path=False): '\n Delete almost all discovered files.\n\n :param bool clean_all:\n Tell the subsystem if we have to clean everything instesd\n of almost everything.\n ' if (('do_not_clean' not in PyFunceble.INTERN) or (not PyFunceble.INTERN['do_not_clean'])): to_delete = self.file_to_delete(clean_all) if ((not PyFunceble.abstracts.Version.is_local_cloned()) and clean_all): to_delete.extend(self.databases_to_delete()) for file in to_delete: PyFunceble.helpers.File(file).delete() PyFunceble.LOGGER.info(f'Deleted: {file}') if clean_all: to_avoid = ['whois'] else: to_avoid = ['whois', 'auto_continue', 'inactive', 'mining'] if (not file_path): query = 'DELETE FROM {0}' else: query = 'DELETE FROM {0} WHERE file_path = %(file_path)s' if (PyFunceble.CONFIGURATION.db_type in ['mariadb', 'mysql']): with PyFunceble.engine.MySQL() as connection: for database_name in [y for (x, y) in PyFunceble.engine.MySQL.tables.items() if (x not in to_avoid)]: lquery = query.format(database_name) with connection.cursor() as cursor: cursor.execute(lquery, {'file_path': file_path}) PyFunceble.LOGGER.info(f'Cleaned the data related to {repr(file_path)} from the {database_name} table.') if ((not PyFunceble.abstracts.Version.is_local_cloned()) and clean_all): PyFunceble.load_config() PyFunceble.LOGGER.info(f'Reloaded configuration.')
-2,799,624,771,840,950,000
Delete almost all discovered files. :param bool clean_all: Tell the subsystem if we have to clean everything instesd of almost everything.
PyFunceble/output/clean.py
almost_everything
NeolithEra/PyFunceble
python
def almost_everything(self, clean_all=False, file_path=False): '\n Delete almost all discovered files.\n\n :param bool clean_all:\n Tell the subsystem if we have to clean everything instesd\n of almost everything.\n ' if (('do_not_clean' not in PyFunceble.INTERN) or (not PyFunceble.INTERN['do_not_clean'])): to_delete = self.file_to_delete(clean_all) if ((not PyFunceble.abstracts.Version.is_local_cloned()) and clean_all): to_delete.extend(self.databases_to_delete()) for file in to_delete: PyFunceble.helpers.File(file).delete() PyFunceble.LOGGER.info(f'Deleted: {file}') if clean_all: to_avoid = ['whois'] else: to_avoid = ['whois', 'auto_continue', 'inactive', 'mining'] if (not file_path): query = 'DELETE FROM {0}' else: query = 'DELETE FROM {0} WHERE file_path = %(file_path)s' if (PyFunceble.CONFIGURATION.db_type in ['mariadb', 'mysql']): with PyFunceble.engine.MySQL() as connection: for database_name in [y for (x, y) in PyFunceble.engine.MySQL.tables.items() if (x not in to_avoid)]: lquery = query.format(database_name) with connection.cursor() as cursor: cursor.execute(lquery, {'file_path': file_path}) PyFunceble.LOGGER.info(f'Cleaned the data related to {repr(file_path)} from the {database_name} table.') if ((not PyFunceble.abstracts.Version.is_local_cloned()) and clean_all): PyFunceble.load_config() PyFunceble.LOGGER.info(f'Reloaded configuration.')
async def get_thumb(self, message): '\n Тупой алгоритм,\n который рекурсивно с конца ищет поле "thumb"\n и если находит, возвращает его\n ' if isinstance(message, list): values = list(enumerate(message)) elif isinstance(message, dict): values = list(message.items()) else: return values.reverse() for (k, v) in values: if (k == 'reply_to_message'): continue if isinstance(v, dict): if ('thumb' in v): return v['thumb'] if (result := (await self.get_thumb(v))): return result
1,209,851,858,328,211,700
Тупой алгоритм, который рекурсивно с конца ищет поле "thumb" и если находит, возвращает его
tgquote/filegetters/base.py
get_thumb
Forevka/tgquote
python
async def get_thumb(self, message): '\n Тупой алгоритм,\n который рекурсивно с конца ищет поле "thumb"\n и если находит, возвращает его\n ' if isinstance(message, list): values = list(enumerate(message)) elif isinstance(message, dict): values = list(message.items()) else: return values.reverse() for (k, v) in values: if (k == 'reply_to_message'): continue if isinstance(v, dict): if ('thumb' in v): return v['thumb'] if (result := (await self.get_thumb(v))): return result
async def get_document(self, message): '\n Тупой алгоритм,\n который рекурсивно с конца ищет поле "file_id"\n и если находит, возвращает его родителя\n ' if isinstance(message, list): values = list(enumerate(message)) elif isinstance(message, dict): values = list(message.items()) else: return values.reverse() for (k, v) in values: if (k == 'reply_to_message'): continue if isinstance(v, dict): if ('file_id' in v): return v if (result := (await self.get_document(v))): return result
1,139,342,667,563,544,700
Тупой алгоритм, который рекурсивно с конца ищет поле "file_id" и если находит, возвращает его родителя
tgquote/filegetters/base.py
get_document
Forevka/tgquote
python
async def get_document(self, message): '\n Тупой алгоритм,\n который рекурсивно с конца ищет поле "file_id"\n и если находит, возвращает его родителя\n ' if isinstance(message, list): values = list(enumerate(message)) elif isinstance(message, dict): values = list(message.items()) else: return values.reverse() for (k, v) in values: if (k == 'reply_to_message'): continue if isinstance(v, dict): if ('file_id' in v): return v if (result := (await self.get_document(v))): return result
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples): '\n Computes gradient penalty based on prediction and weighted real / fake samples\n ' gradients = K.gradients(y_pred, averaged_samples)[0] gradients_sqr = K.square(gradients) gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) gradient_l2_norm = K.sqrt(gradients_sqr_sum) gradient_penalty = K.square((1 - gradient_l2_norm)) return K.mean(gradient_penalty)
2,655,758,970,492,790,300
Computes gradient penalty based on prediction and weighted real / fake samples
wgan_gp/wgan_gp.py
gradient_penalty_loss
311nguyenbaohuy/Keras-GAN
python
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples): '\n \n ' gradients = K.gradients(y_pred, averaged_samples)[0] gradients_sqr = K.square(gradients) gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) gradient_l2_norm = K.sqrt(gradients_sqr_sum) gradient_penalty = K.square((1 - gradient_l2_norm)) return K.mean(gradient_penalty)
def find_subsections(section: Element) -> List[nodes.section]: 'Return a list of subsections for the given ``section``.' result = [] for child in section: if isinstance(child, nodes.section): result.append(child) continue elif isinstance(child, nodes.Element): result.extend(find_subsections(child)) return result
7,239,203,639,831,353,000
Return a list of subsections for the given ``section``.
sphinx/writers/texinfo.py
find_subsections
Bibo-Joshi/sphinx
python
def find_subsections(section: Element) -> List[nodes.section]: result = [] for child in section: if isinstance(child, nodes.section): result.append(child) continue elif isinstance(child, nodes.Element): result.extend(find_subsections(child)) return result
def smart_capwords(s: str, sep: str=None) -> str: 'Like string.capwords() but does not capitalize words that already\n contain a capital letter.' words = s.split(sep) for (i, word) in enumerate(words): if all((x.islower() for x in word)): words[i] = word.capitalize() return (sep or ' ').join(words)
8,820,783,055,573,011,000
Like string.capwords() but does not capitalize words that already contain a capital letter.
sphinx/writers/texinfo.py
smart_capwords
Bibo-Joshi/sphinx
python
def smart_capwords(s: str, sep: str=None) -> str: 'Like string.capwords() but does not capitalize words that already\n contain a capital letter.' words = s.split(sep) for (i, word) in enumerate(words): if all((x.islower() for x in word)): words[i] = word.capitalize() return (sep or ' ').join(words)
def collect_node_names(self) -> None: 'Generates a unique id for each section.\n\n Assigns the attribute ``node_name`` to each section.' def add_node_name(name: str) -> str: node_id = self.escape_id(name) (nth, suffix) = (1, '') while (((node_id + suffix) in self.written_ids) or ((node_id + suffix) in self.node_names)): nth += 1 suffix = ('<%s>' % nth) node_id += suffix self.written_ids.add(node_id) self.node_names[node_id] = name return node_id self.document['node_name'] = 'Top' add_node_name('Top') add_node_name('top') self.indices = [(add_node_name(name), content) for (name, content) in self.indices] for section in self.document.findall(nodes.section): title = cast(nodes.TextElement, section.next_node(nodes.Titular)) name = (title.astext() if title else '<untitled>') section['node_name'] = add_node_name(name)
3,862,376,790,812,649,000
Generates a unique id for each section. Assigns the attribute ``node_name`` to each section.
sphinx/writers/texinfo.py
collect_node_names
Bibo-Joshi/sphinx
python
def collect_node_names(self) -> None: 'Generates a unique id for each section.\n\n Assigns the attribute ``node_name`` to each section.' def add_node_name(name: str) -> str: node_id = self.escape_id(name) (nth, suffix) = (1, ) while (((node_id + suffix) in self.written_ids) or ((node_id + suffix) in self.node_names)): nth += 1 suffix = ('<%s>' % nth) node_id += suffix self.written_ids.add(node_id) self.node_names[node_id] = name return node_id self.document['node_name'] = 'Top' add_node_name('Top') add_node_name('top') self.indices = [(add_node_name(name), content) for (name, content) in self.indices] for section in self.document.findall(nodes.section): title = cast(nodes.TextElement, section.next_node(nodes.Titular)) name = (title.astext() if title else '<untitled>') section['node_name'] = add_node_name(name)
def collect_node_menus(self) -> None: 'Collect the menu entries for each "node" section.' node_menus = self.node_menus targets: List[Element] = [self.document] targets.extend(self.document.findall(nodes.section)) for node in targets: assert (('node_name' in node) and node['node_name']) entries = [s['node_name'] for s in find_subsections(node)] node_menus[node['node_name']] = entries title = self.document.next_node(nodes.title) top = (title.parent if title else self.document) if (not isinstance(top, (nodes.document, nodes.section))): top = self.document if (top is not self.document): entries = node_menus[top['node_name']] entries += node_menus['Top'][1:] node_menus['Top'] = entries del node_menus[top['node_name']] top['node_name'] = 'Top' for (name, _content) in self.indices: node_menus[name] = [] node_menus['Top'].append(name)
-9,153,002,201,023,103,000
Collect the menu entries for each "node" section.
sphinx/writers/texinfo.py
collect_node_menus
Bibo-Joshi/sphinx
python
def collect_node_menus(self) -> None: node_menus = self.node_menus targets: List[Element] = [self.document] targets.extend(self.document.findall(nodes.section)) for node in targets: assert (('node_name' in node) and node['node_name']) entries = [s['node_name'] for s in find_subsections(node)] node_menus[node['node_name']] = entries title = self.document.next_node(nodes.title) top = (title.parent if title else self.document) if (not isinstance(top, (nodes.document, nodes.section))): top = self.document if (top is not self.document): entries = node_menus[top['node_name']] entries += node_menus['Top'][1:] node_menus['Top'] = entries del node_menus[top['node_name']] top['node_name'] = 'Top' for (name, _content) in self.indices: node_menus[name] = [] node_menus['Top'].append(name)
def collect_rellinks(self) -> None: 'Collect the relative links (next, previous, up) for each "node".' rellinks = self.rellinks node_menus = self.node_menus for id in node_menus: rellinks[id] = ['', '', ''] for (id, entries) in node_menus.items(): for e in entries: rellinks[e][2] = id for (id, entries) in node_menus.items(): for (i, id) in enumerate(entries): if (i != 0): rellinks[id][1] = entries[(i - 1)] if (i != (len(entries) - 1)): rellinks[id][0] = entries[(i + 1)] try: first = node_menus['Top'][0] except IndexError: pass else: rellinks['Top'][0] = first rellinks[first][1] = 'Top'
9,184,016,339,000,884,000
Collect the relative links (next, previous, up) for each "node".
sphinx/writers/texinfo.py
collect_rellinks
Bibo-Joshi/sphinx
python
def collect_rellinks(self) -> None: rellinks = self.rellinks node_menus = self.node_menus for id in node_menus: rellinks[id] = [, , ] for (id, entries) in node_menus.items(): for e in entries: rellinks[e][2] = id for (id, entries) in node_menus.items(): for (i, id) in enumerate(entries): if (i != 0): rellinks[id][1] = entries[(i - 1)] if (i != (len(entries) - 1)): rellinks[id][0] = entries[(i + 1)] try: first = node_menus['Top'][0] except IndexError: pass else: rellinks['Top'][0] = first rellinks[first][1] = 'Top'
def escape(self, s: str) -> str: 'Return a string with Texinfo command characters escaped.' s = s.replace('@', '@@') s = s.replace('{', '@{') s = s.replace('}', '@}') s = s.replace('``', '`@w{`}') s = s.replace("''", "'@w{'}") return s
9,155,230,778,005,700,000
Return a string with Texinfo command characters escaped.
sphinx/writers/texinfo.py
escape
Bibo-Joshi/sphinx
python
def escape(self, s: str) -> str: s = s.replace('@', '@@') s = s.replace('{', '@{') s = s.replace('}', '@}') s = s.replace('``', '`@w{`}') s = s.replace(, "'@w{'}") return s
def escape_arg(self, s: str) -> str: 'Return an escaped string suitable for use as an argument\n to a Texinfo command.' s = self.escape(s) s = s.replace(',', '@comma{}') s = ' '.join(s.split()).strip() return s
6,421,078,286,981,946,000
Return an escaped string suitable for use as an argument to a Texinfo command.
sphinx/writers/texinfo.py
escape_arg
Bibo-Joshi/sphinx
python
def escape_arg(self, s: str) -> str: 'Return an escaped string suitable for use as an argument\n to a Texinfo command.' s = self.escape(s) s = s.replace(',', '@comma{}') s = ' '.join(s.split()).strip() return s
def escape_id(self, s: str) -> str: 'Return an escaped string suitable for node names and anchors.' bad_chars = ',:()' for bc in bad_chars: s = s.replace(bc, ' ') if re.search('[^ .]', s): s = s.replace('.', ' ') s = ' '.join(s.split()).strip() return self.escape(s)
2,745,504,349,594,490,400
Return an escaped string suitable for node names and anchors.
sphinx/writers/texinfo.py
escape_id
Bibo-Joshi/sphinx
python
def escape_id(self, s: str) -> str: bad_chars = ',:()' for bc in bad_chars: s = s.replace(bc, ' ') if re.search('[^ .]', s): s = s.replace('.', ' ') s = ' '.join(s.split()).strip() return self.escape(s)
def escape_menu(self, s: str) -> str: 'Return an escaped string suitable for menu entries.' s = self.escape_arg(s) s = s.replace(':', ';') s = ' '.join(s.split()).strip() return s
-2,540,185,727,114,880,500
Return an escaped string suitable for menu entries.
sphinx/writers/texinfo.py
escape_menu
Bibo-Joshi/sphinx
python
def escape_menu(self, s: str) -> str: s = self.escape_arg(s) s = s.replace(':', ';') s = ' '.join(s.split()).strip() return s
def ensure_eol(self) -> None: 'Ensure the last line in body is terminated by new line.' if (self.body and (self.body[(- 1)][(- 1):] != '\n')): self.body.append('\n')
-8,811,531,292,864,465,000
Ensure the last line in body is terminated by new line.
sphinx/writers/texinfo.py
ensure_eol
Bibo-Joshi/sphinx
python
def ensure_eol(self) -> None: if (self.body and (self.body[(- 1)][(- 1):] != '\n')): self.body.append('\n')
def get_short_id(self, id: str) -> str: "Return a shorter 'id' associated with ``id``." try: sid = self.short_ids[id] except KeyError: sid = hex(len(self.short_ids))[2:] self.short_ids[id] = sid return sid
-8,816,042,497,587,074,000
Return a shorter 'id' associated with ``id``.
sphinx/writers/texinfo.py
get_short_id
Bibo-Joshi/sphinx
python
def get_short_id(self, id: str) -> str: try: sid = self.short_ids[id] except KeyError: sid = hex(len(self.short_ids))[2:] self.short_ids[id] = sid return sid
def load_data(coh, thresh=False): 'Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs \n for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)\n one level up from this script.' if thresh: hg38 = (('../hg38_gistic/' + coh) + '/all_thresholded.by_genes.txt') hg19 = (('../hg19_gistic/' + coh) + '/all_thresholded.by_genes.txt') hg38drops = ['Cytoband', 'Locus ID'] else: hg38 = (('../hg38_gistic/' + coh) + '/all_data_by_genes.txt') hg19 = (('../hg19_gistic/' + coh) + '/all_data_by_genes.txt') hg38drops = ['Cytoband', 'Gene ID'] df_hg19 = pd.read_table(hg19, index_col=[0]).drop(['Cytoband', 'Locus ID'], axis=1) df_hg38 = pd.read_table(hg38, index_col=[0]).drop(hg38drops, axis=1) same_samps = list((set(df_hg38.columns) & set(df_hg19.columns))) same_genes = list((set(df_hg38.index) & set(df_hg19.index))) print(coh, len(same_genes), len(same_samps)) return (df_hg38[same_samps].T[same_genes], df_hg19[same_samps].T[same_genes]) return (df_hg38, df_hg19)
8,639,613,017,847,819,000
Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs for each tumor type live in a parent directory (hg38_gistic or hg19_gistic) one level up from this script.
scripts/AnalysisCode.py
load_data
gaog94/GDAN_QC_CopyNumber
python
def load_data(coh, thresh=False): 'Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs \n for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)\n one level up from this script.' if thresh: hg38 = (('../hg38_gistic/' + coh) + '/all_thresholded.by_genes.txt') hg19 = (('../hg19_gistic/' + coh) + '/all_thresholded.by_genes.txt') hg38drops = ['Cytoband', 'Locus ID'] else: hg38 = (('../hg38_gistic/' + coh) + '/all_data_by_genes.txt') hg19 = (('../hg19_gistic/' + coh) + '/all_data_by_genes.txt') hg38drops = ['Cytoband', 'Gene ID'] df_hg19 = pd.read_table(hg19, index_col=[0]).drop(['Cytoband', 'Locus ID'], axis=1) df_hg38 = pd.read_table(hg38, index_col=[0]).drop(hg38drops, axis=1) same_samps = list((set(df_hg38.columns) & set(df_hg19.columns))) same_genes = list((set(df_hg38.index) & set(df_hg19.index))) print(coh, len(same_genes), len(same_samps)) return (df_hg38[same_samps].T[same_genes], df_hg19[same_samps].T[same_genes]) return (df_hg38, df_hg19)
def raw_value_comparison(coh, plot=False): "Return the average differences in raw copy number values between the\n gene-level calls in hg19 and hg38 for each gene for a given tumor type \n 'coh.' If plot=True, plot the genes' differences in a histogram." (df_38, df_19) = load_data(coh, thresh=False) df_s = (df_38 - df_19) avg_diff = {g: np.average(df_s[g]) for g in df_s.columns.get_level_values('Gene Symbol')} results = [] std = np.std([avg_diff[x] for x in avg_diff]) for g in avg_diff: if (avg_diff[g] > (4 * std)): results.append([coh, 'Pos', g, avg_diff[g]]) elif (avg_diff[g] < ((- 4) * std)): results.append([coh, 'Neg', g, avg_diff[g]]) if plot: plt.hist([avg_diff[x] for x in avg_diff], bins=1000) plt.title(coh, fontsize=16) plt.xlabel('Average CN Difference Between Alignments', fontsize=14) plt.ylabel('Genes', fontsize=14) sns.despine() plt.savefig((('./genehists/' + coh) + '_genehist.pdf')) plt.savefig((('./genehists/' + coh) + '_genehist.png')) plt.clf() return results
7,161,945,418,783,088,000
Return the average differences in raw copy number values between the gene-level calls in hg19 and hg38 for each gene for a given tumor type 'coh.' If plot=True, plot the genes' differences in a histogram.
scripts/AnalysisCode.py
raw_value_comparison
gaog94/GDAN_QC_CopyNumber
python
def raw_value_comparison(coh, plot=False): "Return the average differences in raw copy number values between the\n gene-level calls in hg19 and hg38 for each gene for a given tumor type \n 'coh.' If plot=True, plot the genes' differences in a histogram." (df_38, df_19) = load_data(coh, thresh=False) df_s = (df_38 - df_19) avg_diff = {g: np.average(df_s[g]) for g in df_s.columns.get_level_values('Gene Symbol')} results = [] std = np.std([avg_diff[x] for x in avg_diff]) for g in avg_diff: if (avg_diff[g] > (4 * std)): results.append([coh, 'Pos', g, avg_diff[g]]) elif (avg_diff[g] < ((- 4) * std)): results.append([coh, 'Neg', g, avg_diff[g]]) if plot: plt.hist([avg_diff[x] for x in avg_diff], bins=1000) plt.title(coh, fontsize=16) plt.xlabel('Average CN Difference Between Alignments', fontsize=14) plt.ylabel('Genes', fontsize=14) sns.despine() plt.savefig((('./genehists/' + coh) + '_genehist.pdf')) plt.savefig((('./genehists/' + coh) + '_genehist.png')) plt.clf() return results
def sequential_cohort_test_raw_values(cohs, plot=False): 'Sequentially compare raw gene-level calls for the given tumor types.' c_results = [] for coh in cohs: c_results += raw_value_comparison(coh, plot=plot) df_r = pd.DataFrame(c_results, columns=['Cohort', 'Direction', 'Gene', 'Difference']) gcount = Counter(df_r['Gene']) pos_gcount = Counter(df_r[(df_r['Direction'] == 'Pos')]['Gene']) neg_gcount = Counter(df_r[(df_r['Direction'] == 'Neg')]['Gene']) df = pd.DataFrame([gcount[x] for x in gcount], index=gcount.keys(), columns=['Count']) df['Count_pos'] = [(pos_gcount[x] if (x in pos_gcount) else 0) for x in gcount] df['Count_neg'] = [(neg_gcount[x] if (x in neg_gcount) else 0) for x in gcount] if plot: plt.plot(np.sort([gcount[x] for x in gcount])[::(- 1)], 'b-') plt.xlabel('Gene by Rank', fontsize=16) plt.ylabel('Number of Occurences', fontsize=16) sns.despine() plt.savefig('GeneDevianceDropoff.pdf') plt.savefig('GeneDevianceDropoff.png') df_r.to_csv('./genehists/LargestDifferences.tsv', sep='\t', index=False) df.to_csv('./genehists/LargestDifferenceGenes_ByCount.tsv', sep='\t', index=True)
3,576,139,177,949,071,400
Sequentially compare raw gene-level calls for the given tumor types.
scripts/AnalysisCode.py
sequential_cohort_test_raw_values
gaog94/GDAN_QC_CopyNumber
python
def sequential_cohort_test_raw_values(cohs, plot=False): c_results = [] for coh in cohs: c_results += raw_value_comparison(coh, plot=plot) df_r = pd.DataFrame(c_results, columns=['Cohort', 'Direction', 'Gene', 'Difference']) gcount = Counter(df_r['Gene']) pos_gcount = Counter(df_r[(df_r['Direction'] == 'Pos')]['Gene']) neg_gcount = Counter(df_r[(df_r['Direction'] == 'Neg')]['Gene']) df = pd.DataFrame([gcount[x] for x in gcount], index=gcount.keys(), columns=['Count']) df['Count_pos'] = [(pos_gcount[x] if (x in pos_gcount) else 0) for x in gcount] df['Count_neg'] = [(neg_gcount[x] if (x in neg_gcount) else 0) for x in gcount] if plot: plt.plot(np.sort([gcount[x] for x in gcount])[::(- 1)], 'b-') plt.xlabel('Gene by Rank', fontsize=16) plt.ylabel('Number of Occurences', fontsize=16) sns.despine() plt.savefig('GeneDevianceDropoff.pdf') plt.savefig('GeneDevianceDropoff.png') df_r.to_csv('./genehists/LargestDifferences.tsv', sep='\t', index=False) df.to_csv('./genehists/LargestDifferenceGenes_ByCount.tsv', sep='\t', index=True)
def thresholded_value_comparison(df_hg38, df_hg19, metric='hamming'): "Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either\n hamming (number of discrepancies in each gene) or manhattan (sum of \n 'distances' between each gene so a 1 to -1 change is 2). Returns a vector\n of each gene's metric." out = [] for (i, g) in enumerate(df_hg38.columns): if (metric == 'hamming'): out.append((sum((df_hg19[g] != df_hg38[g])) / len(df_hg19))) elif (metric == 'manhattan'): out.append(sum(abs((df_hg19[g] - df_hg38[g])))) return pd.DataFrame(out, index=df_hg38.columns)
8,923,828,903,387,390,000
Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either hamming (number of discrepancies in each gene) or manhattan (sum of 'distances' between each gene so a 1 to -1 change is 2). Returns a vector of each gene's metric.
scripts/AnalysisCode.py
thresholded_value_comparison
gaog94/GDAN_QC_CopyNumber
python
def thresholded_value_comparison(df_hg38, df_hg19, metric='hamming'): "Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either\n hamming (number of discrepancies in each gene) or manhattan (sum of \n 'distances' between each gene so a 1 to -1 change is 2). Returns a vector\n of each gene's metric." out = [] for (i, g) in enumerate(df_hg38.columns): if (metric == 'hamming'): out.append((sum((df_hg19[g] != df_hg38[g])) / len(df_hg19))) elif (metric == 'manhattan'): out.append(sum(abs((df_hg19[g] - df_hg38[g])))) return pd.DataFrame(out, index=df_hg38.columns)
def sequential_cohort_test_thresholded_values(cohs): 'Compare thresholded gene-level calls for input tumor types.' df_out = pd.DataFrame([]) for coh in cohs: (df_hg38, df_hg19) = load_data(coh, thresh=True) df_results = thresholded_value_comparison(df_hg38, df_hg19, metric='hamming') df_results.columns = [coh] df_out = df_out.join(df_results, how='outer') df_out.to_csv('../readout/DiscordantSampleFractions_perGene_perCohort_thresholdedCalls.tsv', sep='\t') return df_out
2,946,206,052,754,461,000
Compare thresholded gene-level calls for input tumor types.
scripts/AnalysisCode.py
sequential_cohort_test_thresholded_values
gaog94/GDAN_QC_CopyNumber
python
def sequential_cohort_test_thresholded_values(cohs): df_out = pd.DataFrame([]) for coh in cohs: (df_hg38, df_hg19) = load_data(coh, thresh=True) df_results = thresholded_value_comparison(df_hg38, df_hg19, metric='hamming') df_results.columns = [coh] df_out = df_out.join(df_results, how='outer') df_out.to_csv('../readout/DiscordantSampleFractions_perGene_perCohort_thresholdedCalls.tsv', sep='\t') return df_out
def plot_fractionDisagreements_perCohort(cohs): 'Visualize fraction of samples with disagreements in thresholded copy \n number for each gene. Run sequential_cohort_test_thresholded_values()\n before this function.' df = sequential_cohort_test_thresholded_values(cohs) df_box = pd.melt(df.reset_index(), id_vars='Gene Symbol').set_index('Gene Symbol') df_box.columns = ['Tumor Type', 'Fraction of Samples with Disagreements'] dft = df.T dft['med_degenerates'] = df.median(axis=0) boxorder = dft.sort_values('med_degenerates', axis=0).index df_cn = pd.read_table('../../PanCanAneuploidy/bin/PANCAN_armonly_ASandpuritycalls_092817_xcellcalls.txt', index_col=0, usecols=[0, 1, 2, 16]) coh_medians = [int(np.median(df_cn[(df_cn['Type'] == x)]['RecurrentSCNA'].dropna())) for x in df_cn.Type.unique()] df_med = pd.DataFrame(coh_medians, index=df_cn.Type.unique(), columns=['med']) pal = sns.color_palette('Blues', ((max(df_med.med) - min(df_med.med)) + 1)) my_pal = {c: pal[df_med.at[(c, 'med')]] for c in df_med.index} g = sns.boxplot(x=df_box.columns[0], y=df_box.columns[1], data=df_box, order=boxorder, fliersize=1, palette=my_pal, linewidth=0.5) newxticks = [(((x + ' (') + str(df_med.loc[x]['med'])) + ')') for x in boxorder] g.set_xticklabels(newxticks, rotation=90) plt.ylabel('Fraction with Disagreements', fontsize=12) sns.despine() plt.gcf().set_size_inches((8, 3)) plt.savefig('2_thresholdedCN_boxplot.pdf', bbox_inches='tight') plt.savefig('2_thresholdedCN_boxplot.png', bbox_inches='tight')
-8,287,393,572,364,439,000
Visualize fraction of samples with disagreements in thresholded copy number for each gene. Run sequential_cohort_test_thresholded_values() before this function.
scripts/AnalysisCode.py
plot_fractionDisagreements_perCohort
gaog94/GDAN_QC_CopyNumber
python
def plot_fractionDisagreements_perCohort(cohs): 'Visualize fraction of samples with disagreements in thresholded copy \n number for each gene. Run sequential_cohort_test_thresholded_values()\n before this function.' df = sequential_cohort_test_thresholded_values(cohs) df_box = pd.melt(df.reset_index(), id_vars='Gene Symbol').set_index('Gene Symbol') df_box.columns = ['Tumor Type', 'Fraction of Samples with Disagreements'] dft = df.T dft['med_degenerates'] = df.median(axis=0) boxorder = dft.sort_values('med_degenerates', axis=0).index df_cn = pd.read_table('../../PanCanAneuploidy/bin/PANCAN_armonly_ASandpuritycalls_092817_xcellcalls.txt', index_col=0, usecols=[0, 1, 2, 16]) coh_medians = [int(np.median(df_cn[(df_cn['Type'] == x)]['RecurrentSCNA'].dropna())) for x in df_cn.Type.unique()] df_med = pd.DataFrame(coh_medians, index=df_cn.Type.unique(), columns=['med']) pal = sns.color_palette('Blues', ((max(df_med.med) - min(df_med.med)) + 1)) my_pal = {c: pal[df_med.at[(c, 'med')]] for c in df_med.index} g = sns.boxplot(x=df_box.columns[0], y=df_box.columns[1], data=df_box, order=boxorder, fliersize=1, palette=my_pal, linewidth=0.5) newxticks = [(((x + ' (') + str(df_med.loc[x]['med'])) + ')') for x in boxorder] g.set_xticklabels(newxticks, rotation=90) plt.ylabel('Fraction with Disagreements', fontsize=12) sns.despine() plt.gcf().set_size_inches((8, 3)) plt.savefig('2_thresholdedCN_boxplot.pdf', bbox_inches='tight') plt.savefig('2_thresholdedCN_boxplot.png', bbox_inches='tight')
def peakgene_overlaps(combos, same_genes, normalize=False): "Count the number of genes that overlap when examing the hg19 & hg38 \n GISTIC runs' focal peaks." (venn_numbers, gsu, gsi) = ([], [], []) for (coh, ad) in combos: print(coh) fnames = [((('../hg19_gistic/' + coh) + ad) + 'genes.conf_99.txt'), ((('../hg38_gistic/' + coh) + ad) + 'genes.txt')] df38 = pd.read_table(fnames[0], index_col=0).drop(['q value', 'residual q value', 'wide peak boundaries']) df19 = pd.read_table(fnames[1], index_col=0).drop(['q value', 'residual q value', 'wide peak boundaries']) g_38 = (set([x for col in df38.columns for x in df38[col].dropna()]) & same_genes) g_19 = (set([x for col in df19.columns for x in df19[col].dropna()]) & same_genes) (intersect, union) = ((g_38 & g_19), (g_38 | g_19)) gsu.append(union) gsi.append(intersect) if normalize: venn_numbers.append([(len((g_19 - intersect)) / len(union)), (len(intersect) / len(union)), (len((g_38 - intersect)) / len(union))]) else: venn_numbers.append([len((g_19 - intersect)), len(intersect), len((g_38 - intersect))]) index = [((x[0] + '_') + x[1][1:(- 1)]) for x in combos] return pd.DataFrame(venn_numbers, index=index, columns=['hg19 only', 'Intersection', 'hg38 only'])
2,170,390,030,271,310,600
Count the number of genes that overlap when examing the hg19 & hg38 GISTIC runs' focal peaks.
scripts/AnalysisCode.py
peakgene_overlaps
gaog94/GDAN_QC_CopyNumber
python
def peakgene_overlaps(combos, same_genes, normalize=False): "Count the number of genes that overlap when examing the hg19 & hg38 \n GISTIC runs' focal peaks." (venn_numbers, gsu, gsi) = ([], [], []) for (coh, ad) in combos: print(coh) fnames = [((('../hg19_gistic/' + coh) + ad) + 'genes.conf_99.txt'), ((('../hg38_gistic/' + coh) + ad) + 'genes.txt')] df38 = pd.read_table(fnames[0], index_col=0).drop(['q value', 'residual q value', 'wide peak boundaries']) df19 = pd.read_table(fnames[1], index_col=0).drop(['q value', 'residual q value', 'wide peak boundaries']) g_38 = (set([x for col in df38.columns for x in df38[col].dropna()]) & same_genes) g_19 = (set([x for col in df19.columns for x in df19[col].dropna()]) & same_genes) (intersect, union) = ((g_38 & g_19), (g_38 | g_19)) gsu.append(union) gsi.append(intersect) if normalize: venn_numbers.append([(len((g_19 - intersect)) / len(union)), (len(intersect) / len(union)), (len((g_38 - intersect)) / len(union))]) else: venn_numbers.append([len((g_19 - intersect)), len(intersect), len((g_38 - intersect))]) index = [((x[0] + '_') + x[1][1:(- 1)]) for x in combos] return pd.DataFrame(venn_numbers, index=index, columns=['hg19 only', 'Intersection', 'hg38 only'])
def plot_peakgene_overlaps(combos, same_genes, write=False): 'Visualize the results of peakgene_overlaps function in bargraph form.' df_out = peakgene_overlaps(combos, same_genes, normalize=False) (df_d, df_a) = (df_out[(df_out.index.str.split('_').str[(- 1)] == 'del')], df_out[(df_out.index.str.split('_').str[(- 1)] == 'amp')]) for x in zip((df_d, df_a), ('Deletion Peak Memberships', 'Amplification Peak Memberships')): x[0].index = x[0].index.str.split('_').str[0] x[0].plot.bar(stacked=True, color=['#af8dc3', '#f7f7f7', '#7fbf7b'], linewidth=1, edgecolor='k') plt.gca().set_xticklabels(x[0].index, rotation=90) plt.title(x[1], fontsize=18) plt.gcf().set_size_inches(10, 8) sns.despine() plt.savefig((x[1].split(' ')[0] + '_peakMemberships.pdf'), bbox_inches='tight') plt.savefig((x[1].split(' ')[0] + '_peakMemberships.png'), bbox_inches='tight') plt.clf() if write: df_out.to_csv('VennStats_focalpeaks.tsv', sep='\t')
6,568,916,132,747,667,000
Visualize the results of peakgene_overlaps function in bargraph form.
scripts/AnalysisCode.py
plot_peakgene_overlaps
gaog94/GDAN_QC_CopyNumber
python
def plot_peakgene_overlaps(combos, same_genes, write=False): df_out = peakgene_overlaps(combos, same_genes, normalize=False) (df_d, df_a) = (df_out[(df_out.index.str.split('_').str[(- 1)] == 'del')], df_out[(df_out.index.str.split('_').str[(- 1)] == 'amp')]) for x in zip((df_d, df_a), ('Deletion Peak Memberships', 'Amplification Peak Memberships')): x[0].index = x[0].index.str.split('_').str[0] x[0].plot.bar(stacked=True, color=['#af8dc3', '#f7f7f7', '#7fbf7b'], linewidth=1, edgecolor='k') plt.gca().set_xticklabels(x[0].index, rotation=90) plt.title(x[1], fontsize=18) plt.gcf().set_size_inches(10, 8) sns.despine() plt.savefig((x[1].split(' ')[0] + '_peakMemberships.pdf'), bbox_inches='tight') plt.savefig((x[1].split(' ')[0] + '_peakMemberships.png'), bbox_inches='tight') plt.clf() if write: df_out.to_csv('VennStats_focalpeaks.tsv', sep='\t')
def documented_driver_differences(): 'Scan and analyze manually currated DocumentedDriverDifferences.txt file.\n Returns: 1) Number of driver genes called in both hg19 & hg38 GISTIC peaks\n 2) Number of drivers missing in hg38 peaks that appeared in hg19 peaks and\n 3) Number of drivers present in hg38 peaks but absent from hg19 peaks.' df = pd.read_table('../DocumentedDriverDifferences.txt', index_col=0) df['hg19?'] = df['present in hg19?'].str.strip(')').str.strip('(').str.strip('[').str.strip(']') df['hg38?'] = df['present in hg38?'].str.strip(')').str.strip('(').str.strip('[').str.strip(']') matches = sum((df['hg19?'] == df['hg38?'])) lostdrivers = len(df[((df['hg19?'] == 'yes') & (df['hg38?'] == 'no'))]) recovereddrivers = len(df[((df['hg19?'] == 'no') & (df['hg38?'] == 'yes'))]) return (matches, lostdrivers, recovereddrivers)
3,854,181,415,097,760,000
Scan and analyze manually currated DocumentedDriverDifferences.txt file. Returns: 1) Number of driver genes called in both hg19 & hg38 GISTIC peaks 2) Number of drivers missing in hg38 peaks that appeared in hg19 peaks and 3) Number of drivers present in hg38 peaks but absent from hg19 peaks.
scripts/AnalysisCode.py
documented_driver_differences
gaog94/GDAN_QC_CopyNumber
python
def documented_driver_differences(): 'Scan and analyze manually currated DocumentedDriverDifferences.txt file.\n Returns: 1) Number of driver genes called in both hg19 & hg38 GISTIC peaks\n 2) Number of drivers missing in hg38 peaks that appeared in hg19 peaks and\n 3) Number of drivers present in hg38 peaks but absent from hg19 peaks.' df = pd.read_table('../DocumentedDriverDifferences.txt', index_col=0) df['hg19?'] = df['present in hg19?'].str.strip(')').str.strip('(').str.strip('[').str.strip(']') df['hg38?'] = df['present in hg38?'].str.strip(')').str.strip('(').str.strip('[').str.strip(']') matches = sum((df['hg19?'] == df['hg38?'])) lostdrivers = len(df[((df['hg19?'] == 'yes') & (df['hg38?'] == 'no'))]) recovereddrivers = len(df[((df['hg19?'] == 'no') & (df['hg38?'] == 'yes'))]) return (matches, lostdrivers, recovereddrivers)
def read_raw_artemis123(input_fname, preload=False, verbose=None): 'Read Artemis123 data as raw object.\n\n Parameters\n ----------\n input_fname : str\n Path to the data file (extension ``.bin``). The header file with the\n same file name stem and an extension ``.txt`` is expected to be found\n in the same directory.\n preload : bool or str (default False)\n Preload data into memory for data manipulation and faster indexing.\n If True, the data will be preloaded into memory (fast, requires\n large amount of memory). If preload is a string, preload is the\n file name of a memory-mapped file which is used to store the data\n on the hard drive (slower, requires less memory).\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n raw : Instance of Raw\n A Raw object containing the data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n ' return RawArtemis123(input_fname, preload=preload, verbose=verbose)
1,388,420,436,905,467,000
Read Artemis123 data as raw object. Parameters ---------- input_fname : str Path to the data file (extension ``.bin``). The header file with the same file name stem and an extension ``.txt`` is expected to be found in the same directory. preload : bool or str (default False) Preload data into memory for data manipulation and faster indexing. If True, the data will be preloaded into memory (fast, requires large amount of memory). If preload is a string, preload is the file name of a memory-mapped file which is used to store the data on the hard drive (slower, requires less memory). verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- raw : Instance of Raw A Raw object containing the data. See Also -------- mne.io.Raw : Documentation of attribute and methods.
mne/io/artemis123/artemis123.py
read_raw_artemis123
mvdoc/mne-python
python
def read_raw_artemis123(input_fname, preload=False, verbose=None): 'Read Artemis123 data as raw object.\n\n Parameters\n ----------\n input_fname : str\n Path to the data file (extension ``.bin``). The header file with the\n same file name stem and an extension ``.txt`` is expected to be found\n in the same directory.\n preload : bool or str (default False)\n Preload data into memory for data manipulation and faster indexing.\n If True, the data will be preloaded into memory (fast, requires\n large amount of memory). If preload is a string, preload is the\n file name of a memory-mapped file which is used to store the data\n on the hard drive (slower, requires less memory).\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n raw : Instance of Raw\n A Raw object containing the data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n ' return RawArtemis123(input_fname, preload=preload, verbose=verbose)
def _get_artemis123_info(fname): 'Function for extracting info from artemis123 header files.' fname = op.splitext(op.abspath(fname))[0] header = (fname + '.txt') logger.info('Reading header...') chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass', 'FLL_AutoReset', 'FLL_ResetLock'] header_info = dict() header_info['filter_hist'] = [] header_info['comments'] = '' header_info['channels'] = [] with open(header, 'r') as fid: sectionFlag = 0 for line in fid: if ((not line.strip()) or ((sectionFlag == 2) and line.startswith('DAQ Map'))): continue if line.startswith('<end'): sectionFlag = 0 elif line.startswith('<start main header>'): sectionFlag = 1 elif line.startswith('<start per channel header>'): sectionFlag = 2 elif line.startswith('<start comments>'): sectionFlag = 3 elif line.startswith('<start length>'): sectionFlag = 4 elif line.startswith('<start filtering history>'): sectionFlag = 5 elif (sectionFlag == 1): values = line.strip().split('\t') if (len(values) == 1): values.append('') header_info[values[0]] = values[1] elif (sectionFlag == 2): values = line.strip().split('\t') if (len(values) != 7): raise IOError((('Error parsing line \n\t:%s\n' % line) + ('from file %s' % header))) tmp = dict() for (k, v) in zip(chan_keys, values): tmp[k] = v header_info['channels'].append(tmp) elif (sectionFlag == 3): header_info['comments'] = ('%s%s' % (header_info['comments'], line.strip())) elif (sectionFlag == 4): header_info['num_samples'] = int(line.strip()) elif (sectionFlag == 5): header_info['filter_hist'].append(line.strip()) for k in ['Temporal Filter Active?', 'Decimation Active?', 'Spatial Filter Active?']: if (header_info[k] != 'FALSE'): warn(('%s - set to but is not supported' % k)) if header_info['filter_hist']: warn(('Non-Empty Filter histroy found, BUT is not supported' % k)) info = _empty_info(float(header_info['Rate Out'])) try: date = datetime.datetime.strptime(op.basename(fname).split('_')[2], '%Y-%m-%d-%Hh-%Mm') meas_date = calendar.timegm(date.utctimetuple()) except Exception: meas_date = None subject_info = {'id': header_info['Subject ID']} desc = '' for k in ['Purpose', 'Notes']: desc += '{} : {}\n'.format(k, header_info[k]) desc += 'Comments : {}'.format(header_info['comments']) info = _empty_info(float(header_info['Rate Out'])) info.update({'filename': fname, 'meas_date': meas_date, 'description': desc, 'buffer_size_sec': 1.0, 'subject_info': subject_info, 'proj_name': header_info['Project Name']}) ref_mag_names = ['REF_001', 'REF_002', 'REF_003', 'REF_004', 'REF_005', 'REF_006'] ref_grad_names = ['REF_007', 'REF_008', 'REF_009', 'REF_010', 'REF_011', 'REF_012'] loc_dict = _load_mne_locs() info['chs'] = [] info['bads'] = [] for (i, chan) in enumerate(header_info['channels']): t = {'cal': float(chan['scaling']), 'ch_name': chan['name'], 'logno': (i + 1), 'scanno': (i + 1), 'range': 1.0, 'unit_mul': FIFF.FIFF_UNITM_NONE, 'coord_frame': FIFF.FIFFV_COORD_DEVICE} t['loc'] = loc_dict.get(chan['name'], np.zeros(12)) if chan['name'].startswith('MEG'): t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD t['kind'] = FIFF.FIFFV_MEG_CH t['unit'] = FIFF.FIFF_UNIT_T t['unit_mul'] = FIFF.FIFF_UNITM_F elif (chan['name'] in ref_mag_names): t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG t['kind'] = FIFF.FIFFV_REF_MEG_CH t['unit'] = FIFF.FIFF_UNIT_T t['unit_mul'] = FIFF.FIFF_UNITM_F elif (chan['name'] in ref_grad_names): t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD t['kind'] = FIFF.FIFFV_REF_MEG_CH t['unit'] = FIFF.FIFF_UNIT_T t['unit_mul'] = FIFF.FIFF_UNITM_F elif chan['name'].startswith('REF'): t['coil_type'] = FIFF.FIFFV_COIL_NONE t['kind'] = FIFF.FIFFV_MISC_CH t['unit'] = FIFF.FIFF_UNIT_V info['bads'].append(t['ch_name']) elif chan['name'].startswith(('AUX', 'TRG', 'MIO')): t['coil_type'] = FIFF.FIFFV_COIL_NONE t['unit'] = FIFF.FIFF_UNIT_V if chan['name'].startswith('TRG'): t['kind'] = FIFF.FIFFV_STIM_CH else: t['kind'] = FIFF.FIFFV_MISC_CH else: raise ValueError(('Channel does not match expected' + (' channel Types:"%s"' % chan['name']))) t['cal'] *= (10 ** t['unit_mul']) t['unit_mul'] = FIFF.FIFF_UNITM_NONE info['chs'].append(t) if (chan['FLL_ResetLock'] == 'TRUE'): info['bads'].append(t['ch_name']) info['bads'] = list(set(info['bads'])) info._update_redundant() return (info, header_info)
6,327,057,148,829,524,000
Function for extracting info from artemis123 header files.
mne/io/artemis123/artemis123.py
_get_artemis123_info
mvdoc/mne-python
python
def _get_artemis123_info(fname): fname = op.splitext(op.abspath(fname))[0] header = (fname + '.txt') logger.info('Reading header...') chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass', 'FLL_AutoReset', 'FLL_ResetLock'] header_info = dict() header_info['filter_hist'] = [] header_info['comments'] = header_info['channels'] = [] with open(header, 'r') as fid: sectionFlag = 0 for line in fid: if ((not line.strip()) or ((sectionFlag == 2) and line.startswith('DAQ Map'))): continue if line.startswith('<end'): sectionFlag = 0 elif line.startswith('<start main header>'): sectionFlag = 1 elif line.startswith('<start per channel header>'): sectionFlag = 2 elif line.startswith('<start comments>'): sectionFlag = 3 elif line.startswith('<start length>'): sectionFlag = 4 elif line.startswith('<start filtering history>'): sectionFlag = 5 elif (sectionFlag == 1): values = line.strip().split('\t') if (len(values) == 1): values.append() header_info[values[0]] = values[1] elif (sectionFlag == 2): values = line.strip().split('\t') if (len(values) != 7): raise IOError((('Error parsing line \n\t:%s\n' % line) + ('from file %s' % header))) tmp = dict() for (k, v) in zip(chan_keys, values): tmp[k] = v header_info['channels'].append(tmp) elif (sectionFlag == 3): header_info['comments'] = ('%s%s' % (header_info['comments'], line.strip())) elif (sectionFlag == 4): header_info['num_samples'] = int(line.strip()) elif (sectionFlag == 5): header_info['filter_hist'].append(line.strip()) for k in ['Temporal Filter Active?', 'Decimation Active?', 'Spatial Filter Active?']: if (header_info[k] != 'FALSE'): warn(('%s - set to but is not supported' % k)) if header_info['filter_hist']: warn(('Non-Empty Filter histroy found, BUT is not supported' % k)) info = _empty_info(float(header_info['Rate Out'])) try: date = datetime.datetime.strptime(op.basename(fname).split('_')[2], '%Y-%m-%d-%Hh-%Mm') meas_date = calendar.timegm(date.utctimetuple()) except Exception: meas_date = None subject_info = {'id': header_info['Subject ID']} desc = for k in ['Purpose', 'Notes']: desc += '{} : {}\n'.format(k, header_info[k]) desc += 'Comments : {}'.format(header_info['comments']) info = _empty_info(float(header_info['Rate Out'])) info.update({'filename': fname, 'meas_date': meas_date, 'description': desc, 'buffer_size_sec': 1.0, 'subject_info': subject_info, 'proj_name': header_info['Project Name']}) ref_mag_names = ['REF_001', 'REF_002', 'REF_003', 'REF_004', 'REF_005', 'REF_006'] ref_grad_names = ['REF_007', 'REF_008', 'REF_009', 'REF_010', 'REF_011', 'REF_012'] loc_dict = _load_mne_locs() info['chs'] = [] info['bads'] = [] for (i, chan) in enumerate(header_info['channels']): t = {'cal': float(chan['scaling']), 'ch_name': chan['name'], 'logno': (i + 1), 'scanno': (i + 1), 'range': 1.0, 'unit_mul': FIFF.FIFF_UNITM_NONE, 'coord_frame': FIFF.FIFFV_COORD_DEVICE} t['loc'] = loc_dict.get(chan['name'], np.zeros(12)) if chan['name'].startswith('MEG'): t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD t['kind'] = FIFF.FIFFV_MEG_CH t['unit'] = FIFF.FIFF_UNIT_T t['unit_mul'] = FIFF.FIFF_UNITM_F elif (chan['name'] in ref_mag_names): t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG t['kind'] = FIFF.FIFFV_REF_MEG_CH t['unit'] = FIFF.FIFF_UNIT_T t['unit_mul'] = FIFF.FIFF_UNITM_F elif (chan['name'] in ref_grad_names): t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD t['kind'] = FIFF.FIFFV_REF_MEG_CH t['unit'] = FIFF.FIFF_UNIT_T t['unit_mul'] = FIFF.FIFF_UNITM_F elif chan['name'].startswith('REF'): t['coil_type'] = FIFF.FIFFV_COIL_NONE t['kind'] = FIFF.FIFFV_MISC_CH t['unit'] = FIFF.FIFF_UNIT_V info['bads'].append(t['ch_name']) elif chan['name'].startswith(('AUX', 'TRG', 'MIO')): t['coil_type'] = FIFF.FIFFV_COIL_NONE t['unit'] = FIFF.FIFF_UNIT_V if chan['name'].startswith('TRG'): t['kind'] = FIFF.FIFFV_STIM_CH else: t['kind'] = FIFF.FIFFV_MISC_CH else: raise ValueError(('Channel does not match expected' + (' channel Types:"%s"' % chan['name']))) t['cal'] *= (10 ** t['unit_mul']) t['unit_mul'] = FIFF.FIFF_UNITM_NONE info['chs'].append(t) if (chan['FLL_ResetLock'] == 'TRUE'): info['bads'].append(t['ch_name']) info['bads'] = list(set(info['bads'])) info._update_redundant() return (info, header_info)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): 'Read a chunk of raw data.' _read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype='>f4')
6,048,899,956,598,921,000
Read a chunk of raw data.
mne/io/artemis123/artemis123.py
_read_segment_file
mvdoc/mne-python
python
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): _read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype='>f4')
@singledispatch def das(var, level=0): 'Single dispatcher that generates the DAS response.' raise StopIteration
4,591,123,262,229,556,000
Single dispatcher that generates the DAS response.
src/pydap/responses/das.py
das
JohnMLarkin/pydap
python
@singledispatch def das(var, level=0): raise StopIteration
def build_attributes(attr, values, level=0): 'Recursive function to build the DAS.' if isinstance(values, dict): (yield '{indent}{attr} {{\n'.format(indent=(level * INDENT), attr=attr)) for (k, v) in values.items(): for line in build_attributes(k, v, (level + 1)): (yield line) (yield '{indent}}}\n'.format(indent=(level * INDENT))) else: type = get_type(values) if (isinstance(values, string_types) or (not isinstance(values, Iterable)) or (getattr(values, 'shape', None) == ())): values = [encode(values)] else: values = map(encode, values) (yield '{indent}{type} {attr} {values};\n'.format(indent=(level * INDENT), type=type, attr=quote(attr), values=', '.join(values)))
-7,251,863,667,847,830,000
Recursive function to build the DAS.
src/pydap/responses/das.py
build_attributes
JohnMLarkin/pydap
python
def build_attributes(attr, values, level=0): if isinstance(values, dict): (yield '{indent}{attr} {{\n'.format(indent=(level * INDENT), attr=attr)) for (k, v) in values.items(): for line in build_attributes(k, v, (level + 1)): (yield line) (yield '{indent}}}\n'.format(indent=(level * INDENT))) else: type = get_type(values) if (isinstance(values, string_types) or (not isinstance(values, Iterable)) or (getattr(values, 'shape', None) == ())): values = [encode(values)] else: values = map(encode, values) (yield '{indent}{type} {attr} {values};\n'.format(indent=(level * INDENT), type=type, attr=quote(attr), values=', '.join(values)))
def get_type(values): 'Extract the type of a variable.\n\n This function tries to determine the DAP type of a Python variable using\n several methods. Returns the DAP type as a string.\n\n ' if hasattr(values, 'dtype'): return NUMPY_TO_DAP2_TYPEMAP[values.dtype.char] elif (isinstance(values, string_types) or (not isinstance(values, Iterable))): return type_convert(values) else: types = [type_convert(val) for val in values] precedence = ['String', 'Float64', 'Int32'] types.sort(key=precedence.index) return types[0]
5,853,865,236,788,224,000
Extract the type of a variable. This function tries to determine the DAP type of a Python variable using several methods. Returns the DAP type as a string.
src/pydap/responses/das.py
get_type
JohnMLarkin/pydap
python
def get_type(values): 'Extract the type of a variable.\n\n This function tries to determine the DAP type of a Python variable using\n several methods. Returns the DAP type as a string.\n\n ' if hasattr(values, 'dtype'): return NUMPY_TO_DAP2_TYPEMAP[values.dtype.char] elif (isinstance(values, string_types) or (not isinstance(values, Iterable))): return type_convert(values) else: types = [type_convert(val) for val in values] precedence = ['String', 'Float64', 'Int32'] types.sort(key=precedence.index) return types[0]
def type_convert(obj): 'Map Python objects to the corresponding Opendap types.\n\n Returns the DAP representation of the type as a string.\n\n ' if isinstance(obj, float): return 'Float64' elif isinstance(obj, integer_types): return 'Int32' else: return 'String'
-4,285,503,868,923,275,000
Map Python objects to the corresponding Opendap types. Returns the DAP representation of the type as a string.
src/pydap/responses/das.py
type_convert
JohnMLarkin/pydap
python
def type_convert(obj): 'Map Python objects to the corresponding Opendap types.\n\n Returns the DAP representation of the type as a string.\n\n ' if isinstance(obj, float): return 'Float64' elif isinstance(obj, integer_types): return 'Int32' else: return 'String'
def _parse__event_type_ids(self): "turns the request's `event_type=operations__update_recents__global` into an id." event_type_id = None event_type = self.request.params.get('event_type', None) if event_type: try: event_type_id = model_utils.OperationsEventType.from_string(event_type) except AttributeError: event_type = None event_type_id = None if event_type_id: return (event_type_id,) return None
7,138,887,671,699,360,000
turns the request's `event_type=operations__update_recents__global` into an id.
src/peter_sslers/web/views_admin/operation.py
_parse__event_type_ids
jvanasco/peter_sslers
python
def _parse__event_type_ids(self): event_type_id = None event_type = self.request.params.get('event_type', None) if event_type: try: event_type_id = model_utils.OperationsEventType.from_string(event_type) except AttributeError: event_type = None event_type_id = None if event_type_id: return (event_type_id,) return None
def get_queryset(self, request): '\n read queryset if is superuser\n or read owns objects\n ' qs = super().get_queryset(request) if request.user.is_superuser: return qs return qs.filter(created_by=request.user)
-5,192,438,078,885,395,000
read queryset if is superuser or read owns objects
schedules/mixins.py
get_queryset
dvek/scyp
python
def get_queryset(self, request): '\n read queryset if is superuser\n or read owns objects\n ' qs = super().get_queryset(request) if request.user.is_superuser: return qs return qs.filter(created_by=request.user)
def response_change(self, request, obj): "\n get from response change some custom action from post\n ej: '_custom_action' in request.POST:\n " if ('_custom_action' in request.POST): pass return super().response_change(request, obj)
5,401,525,779,719,651,000
get from response change some custom action from post ej: '_custom_action' in request.POST:
schedules/mixins.py
response_change
dvek/scyp
python
def response_change(self, request, obj): "\n get from response change some custom action from post\n ej: '_custom_action' in request.POST:\n " if ('_custom_action' in request.POST): pass return super().response_change(request, obj)
def response_add(self, request, obj): "\n get from response change some custom action from post\n ej: '_custom_action' in request.POST:\n " if ('_custom_action' in request.POST): pass return super().response_add(request, obj)
-777,167,301,962,328,600
get from response change some custom action from post ej: '_custom_action' in request.POST:
schedules/mixins.py
response_add
dvek/scyp
python
def response_add(self, request, obj): "\n get from response change some custom action from post\n ej: '_custom_action' in request.POST:\n " if ('_custom_action' in request.POST): pass return super().response_add(request, obj)
def get_queryset(self, request): '\n read queryset if is superuser\n or read owns objects\n ' qs = super().get_queryset(request) if request.user.is_superuser: return qs return qs.filter(created_by=request.user)
-5,192,438,078,885,395,000
read queryset if is superuser or read owns objects
schedules/mixins.py
get_queryset
dvek/scyp
python
def get_queryset(self, request): '\n read queryset if is superuser\n or read owns objects\n ' qs = super().get_queryset(request) if request.user.is_superuser: return qs return qs.filter(created_by=request.user)
def _split_generators(self, dl_manager: nlp.DownloadManager): 'Returns SplitGenerators.' dl_dir = dl_manager.download_and_extract(_URL) data_dir = os.path.join(dl_dir, 'ijcnlp_dailydialog') for name in ('train', 'validation', 'test'): zip_fpath = os.path.join(data_dir, f'{name}.zip') with ZipFile(zip_fpath) as zip_file: zip_file.extractall(path=data_dir) zip_file.close() return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={'file_path': os.path.join(data_dir, 'train', 'dialogues_train.txt'), 'act_path': os.path.join(data_dir, 'train', 'dialogues_act_train.txt'), 'emotion_path': os.path.join(data_dir, 'train', 'dialogues_emotion_train.txt'), 'split': 'train'}), nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={'file_path': os.path.join(data_dir, 'test', 'dialogues_test.txt'), 'act_path': os.path.join(data_dir, 'test', 'dialogues_act_test.txt'), 'emotion_path': os.path.join(data_dir, 'test', 'dialogues_emotion_test.txt'), 'split': 'test'}), nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={'file_path': os.path.join(data_dir, 'validation', 'dialogues_validation.txt'), 'act_path': os.path.join(data_dir, 'validation', 'dialogues_act_validation.txt'), 'emotion_path': os.path.join(data_dir, 'validation', 'dialogues_emotion_validation.txt'), 'split': 'dev'})]
-3,751,968,599,593,737,700
Returns SplitGenerators.
datasets/daily_dialog/daily_dialog.py
_split_generators
vinayya/nlp
python
def _split_generators(self, dl_manager: nlp.DownloadManager): dl_dir = dl_manager.download_and_extract(_URL) data_dir = os.path.join(dl_dir, 'ijcnlp_dailydialog') for name in ('train', 'validation', 'test'): zip_fpath = os.path.join(data_dir, f'{name}.zip') with ZipFile(zip_fpath) as zip_file: zip_file.extractall(path=data_dir) zip_file.close() return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={'file_path': os.path.join(data_dir, 'train', 'dialogues_train.txt'), 'act_path': os.path.join(data_dir, 'train', 'dialogues_act_train.txt'), 'emotion_path': os.path.join(data_dir, 'train', 'dialogues_emotion_train.txt'), 'split': 'train'}), nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={'file_path': os.path.join(data_dir, 'test', 'dialogues_test.txt'), 'act_path': os.path.join(data_dir, 'test', 'dialogues_act_test.txt'), 'emotion_path': os.path.join(data_dir, 'test', 'dialogues_emotion_test.txt'), 'split': 'test'}), nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={'file_path': os.path.join(data_dir, 'validation', 'dialogues_validation.txt'), 'act_path': os.path.join(data_dir, 'validation', 'dialogues_act_validation.txt'), 'emotion_path': os.path.join(data_dir, 'validation', 'dialogues_emotion_validation.txt'), 'split': 'dev'})]
def _generate_examples(self, file_path, act_path, emotion_path, split): ' Yields examples. ' with open(file_path, 'r', encoding='utf-8') as f, open(act_path, 'r', encoding='utf-8') as act, open(emotion_path, 'r', encoding='utf-8') as emotion: for (i, (line_f, line_act, line_emotion)) in enumerate(zip(f, act, emotion)): if (len(line_f.strip()) == 0): break dialog = line_f.split(self.__EOU__)[:(- 1)] act = line_act.split(' ')[:(- 1)] emotion = line_emotion.split(' ')[:(- 1)] assert (len(dialog) == len(act) == len(emotion)), 'Different turns btw dialogue & emotion & action' (yield (f'{split}-{i}', {'dialog': dialog, 'act': [act_label[x] for x in act], 'emotion': [emotion_label[x] for x in emotion]}))
3,076,678,954,509,529,600
Yields examples.
datasets/daily_dialog/daily_dialog.py
_generate_examples
vinayya/nlp
python
def _generate_examples(self, file_path, act_path, emotion_path, split): ' ' with open(file_path, 'r', encoding='utf-8') as f, open(act_path, 'r', encoding='utf-8') as act, open(emotion_path, 'r', encoding='utf-8') as emotion: for (i, (line_f, line_act, line_emotion)) in enumerate(zip(f, act, emotion)): if (len(line_f.strip()) == 0): break dialog = line_f.split(self.__EOU__)[:(- 1)] act = line_act.split(' ')[:(- 1)] emotion = line_emotion.split(' ')[:(- 1)] assert (len(dialog) == len(act) == len(emotion)), 'Different turns btw dialogue & emotion & action' (yield (f'{split}-{i}', {'dialog': dialog, 'act': [act_label[x] for x in act], 'emotion': [emotion_label[x] for x in emotion]}))
def access_log_decorate(func): '\n 用于记录用户登录后访问网址行为的装饰器\n :param func:\n :return:\n ' @wraps(func) def wrapper(*args, **kwargs): access_user = request.headers.get('X-Real-IP ', request.remote_addr) access_method = request.method access_path = request.path access_time = timestamp_format(time.time()) resp = func(*args, **kwargs) access_result = resp[0].get('status') access_message = (resp[0].get('message', 'Internal Server Error') if resp else 'Internal Server Error') SfoServerAccessLog.add_access_log(access_user, access_method, access_path, access_time, access_result, access_message) return resp return wrapper
-7,364,709,911,023,220,000
用于记录用户登录后访问网址行为的装饰器 :param func: :return:
sfo_server/decorate.py
access_log_decorate
SF-Technology/SFO
python
def access_log_decorate(func): '\n 用于记录用户登录后访问网址行为的装饰器\n :param func:\n :return:\n ' @wraps(func) def wrapper(*args, **kwargs): access_user = request.headers.get('X-Real-IP ', request.remote_addr) access_method = request.method access_path = request.path access_time = timestamp_format(time.time()) resp = func(*args, **kwargs) access_result = resp[0].get('status') access_message = (resp[0].get('message', 'Internal Server Error') if resp else 'Internal Server Error') SfoServerAccessLog.add_access_log(access_user, access_method, access_path, access_time, access_result, access_message) return resp return wrapper
def login_required(func): '\n 验证是否登录\n :param func:\n :return:\n ' @wraps(func) def wrapper(*args, **kwargs): user_account = session.get('username', '') if user_account: login_user = SfoServerUser.query_user_by_account(user_account) g.user = login_user return func(*args, **kwargs) else: return ResponseBase(json.dumps({'status': 401, 'message': u'请先登录'}), status=401, content_type='application/json') return wrapper
2,283,296,594,586,760,200
验证是否登录 :param func: :return:
sfo_server/decorate.py
login_required
SF-Technology/SFO
python
def login_required(func): '\n 验证是否登录\n :param func:\n :return:\n ' @wraps(func) def wrapper(*args, **kwargs): user_account = session.get('username', ) if user_account: login_user = SfoServerUser.query_user_by_account(user_account) g.user = login_user return func(*args, **kwargs) else: return ResponseBase(json.dumps({'status': 401, 'message': u'请先登录'}), status=401, content_type='application/json') return wrapper
def permission_required(*resources): '\n 权限验证的前提是用户已经登录\n 权限验证\n :param resources: 控制的资源对象\n ' def decorate(func): @wraps(func) def wrapper(*args, **kwargs): method = func.__name__ resource_names = [resource.__tablename__ for resource in resources] need_permission = set([((method + '_') + resource_name) for resource_name in resource_names]) user = getattr(g, 'user', '') has_permission_set = set() is_clusteradmin = (user.is_clusteradmin if user else 0) if is_clusteradmin: return func(*args, **kwargs) if user: for role in user.roles: for permission in role.permissions: has_permission_set.add(permission.permission_name) if (not need_permission.issubset(has_permission_set)): return ResponseBase(json.dumps({'status': 403, 'message': u'权限不足,请联系管理员'}), status=403, content_type='application/json') else: return func(*args, **kwargs) else: return ResponseBase(json.dumps({'status': 401, 'message': u'请先登录'}), status=401, content_type='application/json') return wrapper return decorate
2,981,514,981,444,468,700
权限验证的前提是用户已经登录 权限验证 :param resources: 控制的资源对象
sfo_server/decorate.py
permission_required
SF-Technology/SFO
python
def permission_required(*resources): '\n 权限验证的前提是用户已经登录\n 权限验证\n :param resources: 控制的资源对象\n ' def decorate(func): @wraps(func) def wrapper(*args, **kwargs): method = func.__name__ resource_names = [resource.__tablename__ for resource in resources] need_permission = set([((method + '_') + resource_name) for resource_name in resource_names]) user = getattr(g, 'user', ) has_permission_set = set() is_clusteradmin = (user.is_clusteradmin if user else 0) if is_clusteradmin: return func(*args, **kwargs) if user: for role in user.roles: for permission in role.permissions: has_permission_set.add(permission.permission_name) if (not need_permission.issubset(has_permission_set)): return ResponseBase(json.dumps({'status': 403, 'message': u'权限不足,请联系管理员'}), status=403, content_type='application/json') else: return func(*args, **kwargs) else: return ResponseBase(json.dumps({'status': 401, 'message': u'请先登录'}), status=401, content_type='application/json') return wrapper return decorate
@when('@:2.4.99') def patch(self): ' see https://github.com/spack/spack/issues/13559 ' filter_file('import sys', 'import sys; return "{0}"'.format(self.prefix.include), 'pybind11/__init__.py', string=True)
-4,811,059,582,925,944,000
see https://github.com/spack/spack/issues/13559
var/spack/repos/builtin/packages/py-pybind11/package.py
patch
ikitayama/spack
python
@when('@:2.4.99') def patch(self): ' ' filter_file('import sys', 'import sys; return "{0}"'.format(self.prefix.include), 'pybind11/__init__.py', string=True)
def _DashboardJsonToRawRows(dash_json_dict): "Formats a Dashboard JSON dict as a list of row dicts.\n\n For the dashboard to begin accepting the Telemetry Dashboard JSON format\n as per go/telemetry-json, this function chunks a Dashboard JSON literal\n into rows and passes the resulting list to _AddTasks.\n\n Args:\n dash_json_dict: A dashboard JSON v1.0 dict.\n\n Returns:\n A list of dicts, each of which represents a point.\n\n Raises:\n AssertionError: The given argument wasn't a dict.\n BadRequestError: The content of the input wasn't valid.\n " assert (type(dash_json_dict) is dict) if (not dash_json_dict.get('master')): raise BadRequestError('No master name given.') if (not dash_json_dict.get('bot')): raise BadRequestError('No bot name given.') if (not dash_json_dict.get('point_id')): raise BadRequestError('No point_id number given.') if (not dash_json_dict.get('chart_data')): raise BadRequestError('No chart data given.') test_suite_name = _TestSuiteName(dash_json_dict) chart_data = dash_json_dict.get('chart_data', {}) charts = chart_data.get('charts', {}) if (not charts): return [] tracing_links = None if ('trace' in charts): tracing_links = charts['trace'].copy() del charts['trace'] row_template = _MakeRowTemplate(dash_json_dict) benchmark_description = chart_data.get('benchmark_description', '') trace_rerun_options = dict(chart_data.get('trace_rerun_options', [])) is_ref = bool(dash_json_dict.get('is_ref')) rows = [] for chart in charts: for trace in charts[chart]: row = copy.deepcopy(row_template) specific_vals = _FlattenTrace(test_suite_name, chart, trace, charts[chart][trace], is_ref, tracing_links, benchmark_description) if (not (math.isnan(specific_vals['value']) or math.isnan(specific_vals['error']))): if specific_vals['tracing_uri']: row['supplemental_columns']['a_tracing_uri'] = specific_vals['tracing_uri'] if trace_rerun_options: row['supplemental_columns']['a_trace_rerun_options'] = trace_rerun_options row.update(specific_vals) rows.append(row) return rows
2,263,905,534,619,643,600
Formats a Dashboard JSON dict as a list of row dicts. For the dashboard to begin accepting the Telemetry Dashboard JSON format as per go/telemetry-json, this function chunks a Dashboard JSON literal into rows and passes the resulting list to _AddTasks. Args: dash_json_dict: A dashboard JSON v1.0 dict. Returns: A list of dicts, each of which represents a point. Raises: AssertionError: The given argument wasn't a dict. BadRequestError: The content of the input wasn't valid.
dashboard/dashboard/add_point.py
_DashboardJsonToRawRows
bopopescu/catapult-2
python
def _DashboardJsonToRawRows(dash_json_dict): "Formats a Dashboard JSON dict as a list of row dicts.\n\n For the dashboard to begin accepting the Telemetry Dashboard JSON format\n as per go/telemetry-json, this function chunks a Dashboard JSON literal\n into rows and passes the resulting list to _AddTasks.\n\n Args:\n dash_json_dict: A dashboard JSON v1.0 dict.\n\n Returns:\n A list of dicts, each of which represents a point.\n\n Raises:\n AssertionError: The given argument wasn't a dict.\n BadRequestError: The content of the input wasn't valid.\n " assert (type(dash_json_dict) is dict) if (not dash_json_dict.get('master')): raise BadRequestError('No master name given.') if (not dash_json_dict.get('bot')): raise BadRequestError('No bot name given.') if (not dash_json_dict.get('point_id')): raise BadRequestError('No point_id number given.') if (not dash_json_dict.get('chart_data')): raise BadRequestError('No chart data given.') test_suite_name = _TestSuiteName(dash_json_dict) chart_data = dash_json_dict.get('chart_data', {}) charts = chart_data.get('charts', {}) if (not charts): return [] tracing_links = None if ('trace' in charts): tracing_links = charts['trace'].copy() del charts['trace'] row_template = _MakeRowTemplate(dash_json_dict) benchmark_description = chart_data.get('benchmark_description', ) trace_rerun_options = dict(chart_data.get('trace_rerun_options', [])) is_ref = bool(dash_json_dict.get('is_ref')) rows = [] for chart in charts: for trace in charts[chart]: row = copy.deepcopy(row_template) specific_vals = _FlattenTrace(test_suite_name, chart, trace, charts[chart][trace], is_ref, tracing_links, benchmark_description) if (not (math.isnan(specific_vals['value']) or math.isnan(specific_vals['error']))): if specific_vals['tracing_uri']: row['supplemental_columns']['a_tracing_uri'] = specific_vals['tracing_uri'] if trace_rerun_options: row['supplemental_columns']['a_trace_rerun_options'] = trace_rerun_options row.update(specific_vals) rows.append(row) return rows
def _TestSuiteName(dash_json_dict): 'Extracts a test suite name from Dashboard JSON.\n\n The dashboard JSON may contain a field "test_suite_name". If this is not\n present or it is None, the dashboard will fall back to using "benchmark_name"\n in the "chart_data" dict.\n ' if dash_json_dict.get('test_suite_name'): return dash_json_dict['test_suite_name'] try: return dash_json_dict['chart_data']['benchmark_name'] except KeyError as e: raise BadRequestError(('Could not find test suite name. ' + e.message))
-8,499,947,017,492,508,000
Extracts a test suite name from Dashboard JSON. The dashboard JSON may contain a field "test_suite_name". If this is not present or it is None, the dashboard will fall back to using "benchmark_name" in the "chart_data" dict.
dashboard/dashboard/add_point.py
_TestSuiteName
bopopescu/catapult-2
python
def _TestSuiteName(dash_json_dict): 'Extracts a test suite name from Dashboard JSON.\n\n The dashboard JSON may contain a field "test_suite_name". If this is not\n present or it is None, the dashboard will fall back to using "benchmark_name"\n in the "chart_data" dict.\n ' if dash_json_dict.get('test_suite_name'): return dash_json_dict['test_suite_name'] try: return dash_json_dict['chart_data']['benchmark_name'] except KeyError as e: raise BadRequestError(('Could not find test suite name. ' + e.message))
def _AddTasks(data): 'Puts tasks on queue for adding data.\n\n Args:\n data: A list of dictionaries, each of which represents one point.\n ' task_list = [] for data_sublist in _Chunk(data, _TASK_QUEUE_SIZE): task_list.append(taskqueue.Task(url='/add_point_queue', params={'data': json.dumps(data_sublist)})) queue = taskqueue.Queue(_TASK_QUEUE_NAME) for task_sublist in _Chunk(task_list, taskqueue.MAX_TASKS_PER_ADD): queue.add_async(task_sublist).get_result()
-2,097,049,842,708,797,200
Puts tasks on queue for adding data. Args: data: A list of dictionaries, each of which represents one point.
dashboard/dashboard/add_point.py
_AddTasks
bopopescu/catapult-2
python
def _AddTasks(data): 'Puts tasks on queue for adding data.\n\n Args:\n data: A list of dictionaries, each of which represents one point.\n ' task_list = [] for data_sublist in _Chunk(data, _TASK_QUEUE_SIZE): task_list.append(taskqueue.Task(url='/add_point_queue', params={'data': json.dumps(data_sublist)})) queue = taskqueue.Queue(_TASK_QUEUE_NAME) for task_sublist in _Chunk(task_list, taskqueue.MAX_TASKS_PER_ADD): queue.add_async(task_sublist).get_result()
def _Chunk(items, chunk_size): 'Breaks a long list into sub-lists of a particular size.' chunks = [] for i in range(0, len(items), chunk_size): chunks.append(items[i:(i + chunk_size)]) return chunks
3,657,317,635,363,090,400
Breaks a long list into sub-lists of a particular size.
dashboard/dashboard/add_point.py
_Chunk
bopopescu/catapult-2
python
def _Chunk(items, chunk_size): chunks = [] for i in range(0, len(items), chunk_size): chunks.append(items[i:(i + chunk_size)]) return chunks
def _MakeRowTemplate(dash_json_dict): 'Produces a template for rows created from a Dashboard JSON v1.0 dict.\n\n _DashboardJsonToRawRows adds metadata fields to every row that it creates.\n These include things like master, bot, point ID, versions, and other\n supplementary data. This method produces a dict containing this metadata\n to which row-specific information (like value and error) can be added.\n Some metadata needs to be transformed to conform to the v0 format, and this\n method is also responsible for that transformation.\n\n Some validation is deferred until after the input is converted to a list\n of row dicts, since revision format correctness is checked on a per-point\n basis.\n\n Args:\n dash_json_dict: A dashboard JSON v1.0 dict.\n\n Returns:\n A dict containing data to include in each row dict that is created from\n |dash_json_dict|.\n ' row_template = dash_json_dict.copy() del row_template['chart_data'] del row_template['point_id'] row_template['revision'] = dash_json_dict['point_id'] annotations = row_template['supplemental'] versions = row_template['versions'] del row_template['supplemental'] del row_template['versions'] row_template['supplemental_columns'] = {} supplemental = row_template['supplemental_columns'] for annotation in annotations: supplemental[('a_' + annotation)] = annotations[annotation] for version in versions: supplemental[('r_' + version)] = versions[version] return row_template
4,264,561,190,868,986,000
Produces a template for rows created from a Dashboard JSON v1.0 dict. _DashboardJsonToRawRows adds metadata fields to every row that it creates. These include things like master, bot, point ID, versions, and other supplementary data. This method produces a dict containing this metadata to which row-specific information (like value and error) can be added. Some metadata needs to be transformed to conform to the v0 format, and this method is also responsible for that transformation. Some validation is deferred until after the input is converted to a list of row dicts, since revision format correctness is checked on a per-point basis. Args: dash_json_dict: A dashboard JSON v1.0 dict. Returns: A dict containing data to include in each row dict that is created from |dash_json_dict|.
dashboard/dashboard/add_point.py
_MakeRowTemplate
bopopescu/catapult-2
python
def _MakeRowTemplate(dash_json_dict): 'Produces a template for rows created from a Dashboard JSON v1.0 dict.\n\n _DashboardJsonToRawRows adds metadata fields to every row that it creates.\n These include things like master, bot, point ID, versions, and other\n supplementary data. This method produces a dict containing this metadata\n to which row-specific information (like value and error) can be added.\n Some metadata needs to be transformed to conform to the v0 format, and this\n method is also responsible for that transformation.\n\n Some validation is deferred until after the input is converted to a list\n of row dicts, since revision format correctness is checked on a per-point\n basis.\n\n Args:\n dash_json_dict: A dashboard JSON v1.0 dict.\n\n Returns:\n A dict containing data to include in each row dict that is created from\n |dash_json_dict|.\n ' row_template = dash_json_dict.copy() del row_template['chart_data'] del row_template['point_id'] row_template['revision'] = dash_json_dict['point_id'] annotations = row_template['supplemental'] versions = row_template['versions'] del row_template['supplemental'] del row_template['versions'] row_template['supplemental_columns'] = {} supplemental = row_template['supplemental_columns'] for annotation in annotations: supplemental[('a_' + annotation)] = annotations[annotation] for version in versions: supplemental[('r_' + version)] = versions[version] return row_template
def _FlattenTrace(test_suite_name, chart_name, trace_name, trace, is_ref=False, tracing_links=None, benchmark_description=''): "Takes a trace dict from dashboard JSON and readies it for display.\n\n Traces can be either scalars or lists; if scalar we take the value directly;\n if list we average the values and compute their standard deviation. We also\n extract fields that are normally part of v0 row dicts that are uploaded\n using add_point but are actually part of traces in the v1.0 format.\n\n Args:\n test_suite_name: The name of the test suite (benchmark).\n chart_name: The name of the chart to which this trace belongs.\n trace_name: The name of the passed trace.\n trace: A trace dict extracted from a dashboard JSON chart.\n is_ref: A boolean which indicates whether this trace comes from a\n reference build.\n tracing_links: A dictionary mapping trace names to about:tracing trace\n urls in cloud storage\n benchmark_description: A string documenting the benchmark suite to which\n this trace belongs.\n\n Returns:\n A dict containing units, value, and error for this trace.\n\n Raises:\n BadRequestError: The data wasn't valid.\n " if ('@@' in chart_name): (tir_label, chart_name) = chart_name.split('@@') chart_name = ((chart_name + '/') + tir_label) (value, error) = _ExtractValueAndError(trace) tracing_uri = None if (tracing_links and (trace_name in tracing_links) and ('cloud_url' in tracing_links[trace_name])): tracing_uri = tracing_links[trace_name]['cloud_url'].replace('\\/', '/') trace_name = _EscapeName(trace_name) if (trace_name == 'summary'): subtest_name = chart_name else: subtest_name = ((chart_name + '/') + trace_name) name = ((test_suite_name + '/') + subtest_name) if ((trace_name == 'summary') and is_ref): name += '/ref' elif ((trace_name != 'summary') and is_ref): name += '_ref' row_dict = {'test': name, 'value': value, 'error': error, 'units': trace['units'], 'tracing_uri': tracing_uri, 'benchmark_description': benchmark_description} if ('improvement_direction' in trace): improvement_direction_str = trace['improvement_direction'] if (improvement_direction_str is None): raise BadRequestError('improvement_direction must not be None') row_dict['higher_is_better'] = _ImprovementDirectionToHigherIsBetter(improvement_direction_str) return row_dict
-3,075,484,346,601,531,000
Takes a trace dict from dashboard JSON and readies it for display. Traces can be either scalars or lists; if scalar we take the value directly; if list we average the values and compute their standard deviation. We also extract fields that are normally part of v0 row dicts that are uploaded using add_point but are actually part of traces in the v1.0 format. Args: test_suite_name: The name of the test suite (benchmark). chart_name: The name of the chart to which this trace belongs. trace_name: The name of the passed trace. trace: A trace dict extracted from a dashboard JSON chart. is_ref: A boolean which indicates whether this trace comes from a reference build. tracing_links: A dictionary mapping trace names to about:tracing trace urls in cloud storage benchmark_description: A string documenting the benchmark suite to which this trace belongs. Returns: A dict containing units, value, and error for this trace. Raises: BadRequestError: The data wasn't valid.
dashboard/dashboard/add_point.py
_FlattenTrace
bopopescu/catapult-2
python
def _FlattenTrace(test_suite_name, chart_name, trace_name, trace, is_ref=False, tracing_links=None, benchmark_description=): "Takes a trace dict from dashboard JSON and readies it for display.\n\n Traces can be either scalars or lists; if scalar we take the value directly;\n if list we average the values and compute their standard deviation. We also\n extract fields that are normally part of v0 row dicts that are uploaded\n using add_point but are actually part of traces in the v1.0 format.\n\n Args:\n test_suite_name: The name of the test suite (benchmark).\n chart_name: The name of the chart to which this trace belongs.\n trace_name: The name of the passed trace.\n trace: A trace dict extracted from a dashboard JSON chart.\n is_ref: A boolean which indicates whether this trace comes from a\n reference build.\n tracing_links: A dictionary mapping trace names to about:tracing trace\n urls in cloud storage\n benchmark_description: A string documenting the benchmark suite to which\n this trace belongs.\n\n Returns:\n A dict containing units, value, and error for this trace.\n\n Raises:\n BadRequestError: The data wasn't valid.\n " if ('@@' in chart_name): (tir_label, chart_name) = chart_name.split('@@') chart_name = ((chart_name + '/') + tir_label) (value, error) = _ExtractValueAndError(trace) tracing_uri = None if (tracing_links and (trace_name in tracing_links) and ('cloud_url' in tracing_links[trace_name])): tracing_uri = tracing_links[trace_name]['cloud_url'].replace('\\/', '/') trace_name = _EscapeName(trace_name) if (trace_name == 'summary'): subtest_name = chart_name else: subtest_name = ((chart_name + '/') + trace_name) name = ((test_suite_name + '/') + subtest_name) if ((trace_name == 'summary') and is_ref): name += '/ref' elif ((trace_name != 'summary') and is_ref): name += '_ref' row_dict = {'test': name, 'value': value, 'error': error, 'units': trace['units'], 'tracing_uri': tracing_uri, 'benchmark_description': benchmark_description} if ('improvement_direction' in trace): improvement_direction_str = trace['improvement_direction'] if (improvement_direction_str is None): raise BadRequestError('improvement_direction must not be None') row_dict['higher_is_better'] = _ImprovementDirectionToHigherIsBetter(improvement_direction_str) return row_dict
def _ExtractValueAndError(trace): 'Returns the value and measure of error from a chartjson trace dict.\n\n Args:\n trace: A dict that has one "result" from a performance test, e.g. one\n "value" in a Telemetry test, with the keys "trace_type", "value", etc.\n\n Returns:\n A pair (value, error) where |value| is a float and |error| is some measure\n of variance used to show error bars; |error| could be None.\n\n Raises:\n BadRequestError: Data format was invalid.\n ' trace_type = trace.get('type') if (trace_type == 'scalar'): value = trace.get('value') if ((value is None) and trace.get('none_value_reason')): return (float('nan'), 0) try: return (float(value), 0) except: raise BadRequestError(('Expected scalar value, got: %r' % value)) if (trace_type == 'list_of_scalar_values'): values = trace.get('values') if ((not isinstance(values, list)) and (values is not None)): raise BadRequestError(('Expected list of scalar values, got: %r' % values)) if ((not values) or (None in values)): if trace.get('none_value_reason'): return (float('nan'), float('nan')) raise BadRequestError(('Expected list of scalar values, got: %r' % values)) if (not all((_IsNumber(v) for v in values))): raise BadRequestError(('Non-number found in values list: %r' % values)) value = math_utils.Mean(values) std = trace.get('std') if (std is not None): error = std else: error = math_utils.StandardDeviation(values) return (value, error) if (trace_type == 'histogram'): return _GeomMeanAndStdDevFromHistogram(trace) raise BadRequestError(('Invalid value type in chart object: %r' % trace_type))
-1,784,660,418,561,223,700
Returns the value and measure of error from a chartjson trace dict. Args: trace: A dict that has one "result" from a performance test, e.g. one "value" in a Telemetry test, with the keys "trace_type", "value", etc. Returns: A pair (value, error) where |value| is a float and |error| is some measure of variance used to show error bars; |error| could be None. Raises: BadRequestError: Data format was invalid.
dashboard/dashboard/add_point.py
_ExtractValueAndError
bopopescu/catapult-2
python
def _ExtractValueAndError(trace): 'Returns the value and measure of error from a chartjson trace dict.\n\n Args:\n trace: A dict that has one "result" from a performance test, e.g. one\n "value" in a Telemetry test, with the keys "trace_type", "value", etc.\n\n Returns:\n A pair (value, error) where |value| is a float and |error| is some measure\n of variance used to show error bars; |error| could be None.\n\n Raises:\n BadRequestError: Data format was invalid.\n ' trace_type = trace.get('type') if (trace_type == 'scalar'): value = trace.get('value') if ((value is None) and trace.get('none_value_reason')): return (float('nan'), 0) try: return (float(value), 0) except: raise BadRequestError(('Expected scalar value, got: %r' % value)) if (trace_type == 'list_of_scalar_values'): values = trace.get('values') if ((not isinstance(values, list)) and (values is not None)): raise BadRequestError(('Expected list of scalar values, got: %r' % values)) if ((not values) or (None in values)): if trace.get('none_value_reason'): return (float('nan'), float('nan')) raise BadRequestError(('Expected list of scalar values, got: %r' % values)) if (not all((_IsNumber(v) for v in values))): raise BadRequestError(('Non-number found in values list: %r' % values)) value = math_utils.Mean(values) std = trace.get('std') if (std is not None): error = std else: error = math_utils.StandardDeviation(values) return (value, error) if (trace_type == 'histogram'): return _GeomMeanAndStdDevFromHistogram(trace) raise BadRequestError(('Invalid value type in chart object: %r' % trace_type))
def _EscapeName(name): 'Escapes a trace name so it can be stored in a row.\n\n Args:\n name: A string representing a name.\n\n Returns:\n An escaped version of the name.\n ' return re.sub('[\\:|=/#&,]', '_', name)
4,860,297,647,989,263,000
Escapes a trace name so it can be stored in a row. Args: name: A string representing a name. Returns: An escaped version of the name.
dashboard/dashboard/add_point.py
_EscapeName
bopopescu/catapult-2
python
def _EscapeName(name): 'Escapes a trace name so it can be stored in a row.\n\n Args:\n name: A string representing a name.\n\n Returns:\n An escaped version of the name.\n ' return re.sub('[\\:|=/#&,]', '_', name)
def _GeomMeanAndStdDevFromHistogram(histogram): "Generates the geom. mean and std. dev. for a histogram.\n\n A histogram is a collection of numerical buckets with associated\n counts; a bucket can either represent a number of instances of a single\n value ('low'), or from within a range of values (in which case 'high' will\n specify the upper bound). We compute the statistics by treating the\n histogram analogously to a list of individual values, where the counts tell\n us how many of each value there are.\n\n Args:\n histogram: A histogram dict with a list 'buckets' of buckets.\n\n Returns:\n The geometric mean and standard deviation of the given histogram.\n " if ('buckets' not in histogram): return (0.0, 0.0) count = 0 sum_of_logs = 0 for bucket in histogram['buckets']: if ('high' in bucket): bucket['mean'] = ((bucket['low'] + bucket['high']) / 2.0) else: bucket['mean'] = bucket['low'] if (bucket['mean'] > 0): sum_of_logs += (math.log(bucket['mean']) * bucket['count']) count += bucket['count'] if (count == 0): return (0.0, 0.0) sum_of_squares = 0 geom_mean = math.exp((sum_of_logs / count)) for bucket in histogram['buckets']: if (bucket['mean'] > 0): sum_of_squares += (((bucket['mean'] - geom_mean) ** 2) * bucket['count']) return (geom_mean, math.sqrt((sum_of_squares / count)))
7,263,711,009,653,982,000
Generates the geom. mean and std. dev. for a histogram. A histogram is a collection of numerical buckets with associated counts; a bucket can either represent a number of instances of a single value ('low'), or from within a range of values (in which case 'high' will specify the upper bound). We compute the statistics by treating the histogram analogously to a list of individual values, where the counts tell us how many of each value there are. Args: histogram: A histogram dict with a list 'buckets' of buckets. Returns: The geometric mean and standard deviation of the given histogram.
dashboard/dashboard/add_point.py
_GeomMeanAndStdDevFromHistogram
bopopescu/catapult-2
python
def _GeomMeanAndStdDevFromHistogram(histogram): "Generates the geom. mean and std. dev. for a histogram.\n\n A histogram is a collection of numerical buckets with associated\n counts; a bucket can either represent a number of instances of a single\n value ('low'), or from within a range of values (in which case 'high' will\n specify the upper bound). We compute the statistics by treating the\n histogram analogously to a list of individual values, where the counts tell\n us how many of each value there are.\n\n Args:\n histogram: A histogram dict with a list 'buckets' of buckets.\n\n Returns:\n The geometric mean and standard deviation of the given histogram.\n " if ('buckets' not in histogram): return (0.0, 0.0) count = 0 sum_of_logs = 0 for bucket in histogram['buckets']: if ('high' in bucket): bucket['mean'] = ((bucket['low'] + bucket['high']) / 2.0) else: bucket['mean'] = bucket['low'] if (bucket['mean'] > 0): sum_of_logs += (math.log(bucket['mean']) * bucket['count']) count += bucket['count'] if (count == 0): return (0.0, 0.0) sum_of_squares = 0 geom_mean = math.exp((sum_of_logs / count)) for bucket in histogram['buckets']: if (bucket['mean'] > 0): sum_of_squares += (((bucket['mean'] - geom_mean) ** 2) * bucket['count']) return (geom_mean, math.sqrt((sum_of_squares / count)))
def _ImprovementDirectionToHigherIsBetter(improvement_direction_str): "Converts an improvement direction string to a higher_is_better boolean.\n\n Args:\n improvement_direction_str: a string, either 'up' or 'down'.\n\n Returns:\n A boolean expressing the appropriate higher_is_better value.\n\n Raises:\n BadRequestError: if improvement_direction_str is invalid.\n " if (improvement_direction_str == 'up'): return True elif (improvement_direction_str == 'down'): return False else: raise BadRequestError(('Invalid improvement direction string: ' + improvement_direction_str))
5,229,265,490,068,555,000
Converts an improvement direction string to a higher_is_better boolean. Args: improvement_direction_str: a string, either 'up' or 'down'. Returns: A boolean expressing the appropriate higher_is_better value. Raises: BadRequestError: if improvement_direction_str is invalid.
dashboard/dashboard/add_point.py
_ImprovementDirectionToHigherIsBetter
bopopescu/catapult-2
python
def _ImprovementDirectionToHigherIsBetter(improvement_direction_str): "Converts an improvement direction string to a higher_is_better boolean.\n\n Args:\n improvement_direction_str: a string, either 'up' or 'down'.\n\n Returns:\n A boolean expressing the appropriate higher_is_better value.\n\n Raises:\n BadRequestError: if improvement_direction_str is invalid.\n " if (improvement_direction_str == 'up'): return True elif (improvement_direction_str == 'down'): return False else: raise BadRequestError(('Invalid improvement direction string: ' + improvement_direction_str))
def _ConstructTestPathMap(row_dicts): 'Makes a mapping from test paths to last added revision.' last_added_revision_keys = [] for row in row_dicts: if (not (('master' in row) and ('bot' in row) and ('test' in row))): continue path = ('%s/%s/%s' % (row['master'], row['bot'], row['test'].strip('/'))) if (len(path) > _MAX_TEST_PATH_LENGTH): continue last_added_revision_keys.append(ndb.Key('LastAddedRevision', path)) try: last_added_revision_entities = ndb.get_multi(last_added_revision_keys) except datastore_errors.BadRequestError: logging.warn('Datastore BadRequestError when getting %s', repr(last_added_revision_keys)) return {} return {r.key.string_id(): r.revision for r in last_added_revision_entities if (r is not None)}
-3,003,235,259,617,169,400
Makes a mapping from test paths to last added revision.
dashboard/dashboard/add_point.py
_ConstructTestPathMap
bopopescu/catapult-2
python
def _ConstructTestPathMap(row_dicts): last_added_revision_keys = [] for row in row_dicts: if (not (('master' in row) and ('bot' in row) and ('test' in row))): continue path = ('%s/%s/%s' % (row['master'], row['bot'], row['test'].strip('/'))) if (len(path) > _MAX_TEST_PATH_LENGTH): continue last_added_revision_keys.append(ndb.Key('LastAddedRevision', path)) try: last_added_revision_entities = ndb.get_multi(last_added_revision_keys) except datastore_errors.BadRequestError: logging.warn('Datastore BadRequestError when getting %s', repr(last_added_revision_keys)) return {} return {r.key.string_id(): r.revision for r in last_added_revision_entities if (r is not None)}
def _ValidateRowDict(row, test_map): 'Checks all fields in the input dictionary.\n\n Args:\n row: A dictionary which represents one point.\n test_map: A dictionary mapping test paths to last added revision.\n\n Raises:\n BadRequestError: The input was not valid.\n ' required_fields = ['master', 'bot', 'test'] for field in required_fields: if (field not in row): raise BadRequestError(('No "%s" field in row dict.' % field)) _ValidateMasterBotTest(row['master'], row['bot'], row['test']) _ValidateRowId(row, test_map) GetAndValidateRowProperties(row)
789,831,051,503,380,000
Checks all fields in the input dictionary. Args: row: A dictionary which represents one point. test_map: A dictionary mapping test paths to last added revision. Raises: BadRequestError: The input was not valid.
dashboard/dashboard/add_point.py
_ValidateRowDict
bopopescu/catapult-2
python
def _ValidateRowDict(row, test_map): 'Checks all fields in the input dictionary.\n\n Args:\n row: A dictionary which represents one point.\n test_map: A dictionary mapping test paths to last added revision.\n\n Raises:\n BadRequestError: The input was not valid.\n ' required_fields = ['master', 'bot', 'test'] for field in required_fields: if (field not in row): raise BadRequestError(('No "%s" field in row dict.' % field)) _ValidateMasterBotTest(row['master'], row['bot'], row['test']) _ValidateRowId(row, test_map) GetAndValidateRowProperties(row)
def _ValidateMasterBotTest(master, bot, test): 'Validates the master, bot, and test properties of a row dict.' test = test.strip('/') if ('/' not in test): raise BadRequestError('Test name must have more than one part.') if (len(test.split('/')) > graph_data.MAX_TEST_ANCESTORS): raise BadRequestError(('Invalid test name: %s' % test)) if (('/' in master) or ('/' in bot)): raise BadRequestError('Illegal slash in master or bot name.') _ValidateTestPath(('%s/%s/%s' % (master, bot, test)))
4,406,785,683,133,111,300
Validates the master, bot, and test properties of a row dict.
dashboard/dashboard/add_point.py
_ValidateMasterBotTest
bopopescu/catapult-2
python
def _ValidateMasterBotTest(master, bot, test): test = test.strip('/') if ('/' not in test): raise BadRequestError('Test name must have more than one part.') if (len(test.split('/')) > graph_data.MAX_TEST_ANCESTORS): raise BadRequestError(('Invalid test name: %s' % test)) if (('/' in master) or ('/' in bot)): raise BadRequestError('Illegal slash in master or bot name.') _ValidateTestPath(('%s/%s/%s' % (master, bot, test)))
def _ValidateTestPath(test_path): 'Checks whether all the parts of the test path are valid.' if (len(test_path) > _MAX_TEST_PATH_LENGTH): raise BadRequestError(('Test path too long: %s' % test_path)) if ('*' in test_path): raise BadRequestError('Illegal asterisk in test name.') for name in test_path.split('/'): _ValidateTestPathPartName(name)
-8,827,850,601,999,613,000
Checks whether all the parts of the test path are valid.
dashboard/dashboard/add_point.py
_ValidateTestPath
bopopescu/catapult-2
python
def _ValidateTestPath(test_path): if (len(test_path) > _MAX_TEST_PATH_LENGTH): raise BadRequestError(('Test path too long: %s' % test_path)) if ('*' in test_path): raise BadRequestError('Illegal asterisk in test name.') for name in test_path.split('/'): _ValidateTestPathPartName(name)